content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
'''
use case handlers package
'''
from .find_average_temperature_handler import FindAverageTemperatureHandler
__all__ = [
'FindAverageTemperatureHandler'
]
| core/handlers/__init__.py | 161 | use case handlers package | 25 | en | 0.364844 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import subprocess
import re
import sys
from importlib import import_module
PROJECT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
CUSTOM_MODULES = set(['arch', 'eggroll', 'federatedml', 'fate_flow'])
USE_SOURCE_MODULES = set(['antlr4', 'mocks', 'TestTokenStreamRewriter'])
class DummyConfig(object):
def __init__(self, intersphinx_mapping=None, intersphinx_cache_limit=5, intersphinx_timeout=None):
self.intersphinx_mapping = intersphinx_mapping or {}
self.intersphinx_cache_limit = intersphinx_cache_limit
self.intersphinx_timeout = intersphinx_timeout
self.tls_verify = True
class DummyApp(object):
def __init__(self):
self.config = DummyConfig()
def get_python_standard_modules(version=None):
version = '{}.{}'.format(sys.version_info[0], sys.version_info[1]) if not version else version
module_cache_file = 'python{}_modules.csv'.format(version.replace('.', '_'))
if os.path.exists(module_cache_file):
print('read python {} standard modules'.format(version))
modules = list()
with open(module_cache_file, 'r') as fr:
while True:
line = fr.readline()
if not line:
break
modules.append(line.strip())
else:
from sphinx.ext.intersphinx import fetch_inventory
print('fetch python {} standard modules'.format(version))
url = "http://docs.python.org/{}/objects.inv".format(version)
modules = sorted(
list(
fetch_inventory(DummyApp(), "", url).get("py:module").keys()
)
)
with open(module_cache_file, 'w') as fw:
fw.write('\n'.join(modules))
return modules
def search_require_modules(project_dir):
grep_cmd = "find {} -name '*.py' | grep -v -E '*_pb2\.py' | grep -v -E '*_pb2_grpc\.py' | grep -v -E 'workflow\.py' | xargs -n1 cat | grep -E '^import|^from'".format(project_dir)
print(grep_cmd)
p = subprocess.Popen(grep_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
import_lines = stdout.decode('utf-8').strip().split('\n')
python_standard_modules = get_python_standard_modules('3.6')
require_modules = set()
require_lines = dict()
all_imports = set()
for line in import_lines:
import_module = re.sub('^import |^from ', '', line).split(' ')[0].strip()
require_module = import_module.split('.')[0]
if len(require_module) == 0:
continue
if ',' in require_module:
tmp = require_module.split(',')
else:
tmp = [require_module]
for r_m in tmp:
if r_m.startswith('.'):
continue
if r_m.endswith('_pb2'):
continue
if r_m in USE_SOURCE_MODULES:
continue
all_imports.add(line.strip())
if r_m in python_standard_modules:
continue
if r_m in CUSTOM_MODULES:
continue
require_modules.add(r_m)
require_lines[r_m] = line.strip()
return require_modules, require_lines, all_imports
def conda_env_install(module):
print('try install: {}'.format(module))
install_cmd = 'conda install -y {}'.format(module)
p = subprocess.Popen(install_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
if p.returncode != 0:
print('try install again: {}'.format(module))
install_cmd = 'conda install -c conda-forge -y {}'.format(module)
p = subprocess.Popen(install_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
return p.returncode
def pip_env_install(module):
print('try install: {}'.format(module))
install_cmd = 'pip install {}'.format(module)
p = subprocess.Popen(install_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
return p.returncode
def try_import(module):
try:
import_module(module)
return 0
except Exception as e:
st = pip_env_install(module)
if st == 0:
return 1
else:
return 2
def check_require(require_modules, require_lines):
for require_module in require_modules:
st = try_import(require_module)
if st == 0:
continue
elif st == 1:
print('installed {}: {}\n'.format(require_module, require_lines[require_module]))
elif st == 2:
print('failed installed {}: {}\n'.format(require_module, require_lines[require_module]))
def check_import(all_imports):
dependent_modules = set()
dependent_lines = dict()
for import_code in all_imports:
python_cmd = "python -c '{}'".format(import_code)
p = subprocess.Popen(python_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
if p.returncode != 0:
# import error
stdout = stdout.decode('utf-8').strip().split('\n')
for line in stdout:
if line.startswith('ModuleNotFoundError:'):
require_module = line.strip().split(' ')[-1].strip("'").split('.')[0]
print('{}: {}'.format(require_module, import_code))
if require_module in CUSTOM_MODULES:
pass
# code error
else:
dependent_modules.add(require_module)
dependent_lines[require_module] = import_code
return dependent_modules, dependent_lines
if __name__ == '__main__':
print('project dir is: {}'.format(PROJECT_DIR))
print('start search import')
require_modules, require_lines, all_imports = search_require_modules(PROJECT_DIR)
print()
print('has {} require modules'.format(len(require_modules)))
print(require_modules)
print()
check_require(require_modules=require_modules, require_lines=require_lines)
print()
dependent_modules, dependent_lines = check_import(all_imports=all_imports)
print()
require_modules.update(dependent_modules)
require_lines.update(dependent_lines)
check_require(require_modules=require_modules, require_lines=require_lines)
print()
| python/fate_flow/tests/check_fate_python_requirement.py | 7,487 | Copyright 2019 The FATE Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. import error code error | 608 | en | 0.850051 |
import itertools
import logging
import warnings
from abc import abstractmethod
from collections import Counter
from pathlib import Path
from typing import Union, List, Tuple, Dict, Optional
import torch.nn
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
import flair
from flair import file_utils
from flair.data import DataPoint, Sentence, Dictionary
from flair.datasets import DataLoader, SentenceDataset
from flair.training_utils import Result, store_embeddings
log = logging.getLogger("flair")
class Model(torch.nn.Module):
"""Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
Every new type of model must implement these methods."""
@property
@abstractmethod
def label_type(self):
"""Each model predicts labels of a certain type. TODO: can we find a better name for this?"""
raise NotImplementedError
@abstractmethod
def forward_loss(self, data_points: Union[List[DataPoint], DataPoint]) -> torch.tensor:
"""Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training."""
raise NotImplementedError
@abstractmethod
def evaluate(
self,
sentences: Union[List[Sentence], Dataset],
gold_label_type: str,
out_path: Union[str, Path] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
num_workers: int = 8,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
) -> Result:
"""Evaluates the model. Returns a Result object containing evaluation
results and a loss value. Implement this to enable evaluation.
:param data_loader: DataLoader that iterates over dataset to be evaluated
:param out_path: Optional output path to store predictions
:param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and
freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU
:return: Returns a Tuple consisting of a Result object and a loss float value
"""
raise NotImplementedError
@abstractmethod
def _get_state_dict(self):
"""Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()
functionality."""
raise NotImplementedError
@staticmethod
@abstractmethod
def _init_model_with_state_dict(state):
"""Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()
functionality."""
raise NotImplementedError
@staticmethod
def _fetch_model(model_name) -> str:
return model_name
def save(self, model_file: Union[str, Path], checkpoint: bool = False):
"""
Saves the current model to the provided file.
:param model_file: the model file
"""
model_state = self._get_state_dict()
# in Flair <0.9.1, optimizer and scheduler used to train model are not saved
optimizer = scheduler = None
# write out a "model card" if one is set
if hasattr(self, 'model_card'):
# special handling for optimizer: remember optimizer class and state dictionary
if 'training_parameters' in self.model_card:
training_parameters = self.model_card['training_parameters']
if 'optimizer' in training_parameters:
optimizer = training_parameters['optimizer']
if checkpoint:
training_parameters['optimizer_state_dict'] = optimizer.state_dict()
training_parameters['optimizer'] = optimizer.__class__
if 'scheduler' in training_parameters:
scheduler = training_parameters['scheduler']
if checkpoint:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
training_parameters['scheduler_state_dict'] = scheduler.state_dict()
training_parameters['scheduler'] = scheduler.__class__
model_state['model_card'] = self.model_card
# save model
torch.save(model_state, str(model_file), pickle_protocol=4)
# restore optimizer and scheduler to model card if set
if optimizer:
self.model_card['training_parameters']['optimizer'] = optimizer
if scheduler:
self.model_card['training_parameters']['scheduler'] = scheduler
@classmethod
def load(cls, model: Union[str, Path]):
"""
Loads the model from the given file.
:param model: the model file
:return: the loaded text classifier model
"""
model_file = cls._fetch_model(str(model))
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
# load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups
# see https://github.com/zalandoresearch/flair/issues/351
f = file_utils.load_big_file(str(model_file))
state = torch.load(f, map_location='cpu')
model = cls._init_model_with_state_dict(state)
if 'model_card' in state:
model.model_card = state['model_card']
model.eval()
model.to(flair.device)
return model
def print_model_card(self):
if hasattr(self, 'model_card'):
param_out = "\n------------------------------------\n"
param_out += "--------- Flair Model Card ---------\n"
param_out += "------------------------------------\n"
param_out += "- this Flair model was trained with:\n"
param_out += f"-- Flair version {self.model_card['flair_version']}\n"
param_out += f"-- PyTorch version {self.model_card['pytorch_version']}\n"
if 'transformers_version' in self.model_card:
param_out += f"-- Transformers version {self.model_card['transformers_version']}\n"
param_out += "------------------------------------\n"
param_out += "------- Training Parameters: -------\n"
param_out += "------------------------------------\n"
training_params = '\n'.join(f'-- {param} = {self.model_card["training_parameters"][param]}'
for param in self.model_card['training_parameters'])
param_out += training_params + "\n"
param_out += "------------------------------------\n"
log.info(param_out)
else:
log.info(
"This model has no model card (likely because it is not yet trained or was trained with Flair version < 0.9.1)")
class Classifier(Model):
"""Abstract base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Model and adds a unified evaluate() function so that all classification models
use the same evaluation routines and compute the same numbers.
Currently, the SequenceTagger implements this class directly, while all other classifiers in Flair
implement the DefaultClassifier base class which implements Classifier."""
def evaluate(
self,
data_points: Union[List[DataPoint], Dataset],
gold_label_type: str,
out_path: Union[str, Path] = None,
embedding_storage_mode: str = "none",
mini_batch_size: int = 32,
num_workers: int = 8,
main_evaluation_metric: Tuple[str, str] = ("micro avg", "f1-score"),
exclude_labels: List[str] = [],
gold_label_dictionary: Optional[Dictionary] = None,
) -> Result:
import numpy as np
import sklearn
# read Dataset into data loader (if list of sentences passed, make Dataset first)
if not isinstance(data_points, Dataset):
data_points = SentenceDataset(data_points)
data_loader = DataLoader(data_points, batch_size=mini_batch_size, num_workers=num_workers)
with torch.no_grad():
# loss calculation
eval_loss = 0
average_over = 0
# variables for printing
lines: List[str] = []
# variables for computing scores
all_spans: List[str] = []
all_true_values = {}
all_predicted_values = {}
sentence_id = 0
for batch in data_loader:
# remove any previously predicted labels
for datapoint in batch:
datapoint.remove_labels('predicted')
# predict for batch
loss_and_count = self.predict(batch,
embedding_storage_mode=embedding_storage_mode,
mini_batch_size=mini_batch_size,
label_name='predicted',
return_loss=True)
if isinstance(loss_and_count, Tuple):
average_over += loss_and_count[1]
eval_loss += loss_and_count[0]
else:
eval_loss += loss_and_count
# get the gold labels
for datapoint in batch:
for gold_label in datapoint.get_labels(gold_label_type):
representation = str(sentence_id) + ': ' + gold_label.identifier
value = gold_label.value
if gold_label_dictionary and gold_label_dictionary.get_idx_for_item(value) == 0:
value = '<unk>'
if representation not in all_true_values:
all_true_values[representation] = [value]
else:
all_true_values[representation].append(value)
if representation not in all_spans:
all_spans.append(representation)
for predicted_span in datapoint.get_labels("predicted"):
representation = str(sentence_id) + ': ' + predicted_span.identifier
# add to all_predicted_values
if representation not in all_predicted_values:
all_predicted_values[representation] = [predicted_span.value]
else:
all_predicted_values[representation].append(predicted_span.value)
if representation not in all_spans:
all_spans.append(representation)
sentence_id += 1
store_embeddings(batch, embedding_storage_mode)
# make printout lines
if out_path:
lines.extend(self._print_predictions(batch, gold_label_type))
# write all_predicted_values to out_file if set
if out_path:
with open(Path(out_path), "w", encoding="utf-8") as outfile:
outfile.write("".join(lines))
# make the evaluation dictionary
evaluation_label_dictionary = Dictionary(add_unk=False)
evaluation_label_dictionary.add_item("O")
for true_values in all_true_values.values():
for label in true_values:
evaluation_label_dictionary.add_item(label)
for predicted_values in all_predicted_values.values():
for label in predicted_values:
evaluation_label_dictionary.add_item(label)
# finally, compute numbers
y_true = []
y_pred = []
for span in all_spans:
true_values = all_true_values[span] if span in all_true_values else ['O']
predicted_values = all_predicted_values[span] if span in all_predicted_values else ['O']
y_true_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for true_value in true_values:
y_true_instance[evaluation_label_dictionary.get_idx_for_item(true_value)] = 1
y_true.append(y_true_instance.tolist())
y_pred_instance = np.zeros(len(evaluation_label_dictionary), dtype=int)
for predicted_value in predicted_values:
y_pred_instance[evaluation_label_dictionary.get_idx_for_item(predicted_value)] = 1
y_pred.append(y_pred_instance.tolist())
# now, calculate evaluation numbers
target_names = []
labels = []
counter = Counter()
counter.update(list(itertools.chain.from_iterable(all_true_values.values())))
counter.update(list(itertools.chain.from_iterable(all_predicted_values.values())))
for label_name, count in counter.most_common():
if label_name == 'O': continue
if label_name in exclude_labels: continue
target_names.append(label_name)
labels.append(evaluation_label_dictionary.get_idx_for_item(label_name))
# there is at least one gold label or one prediction (default)
if len(all_true_values) + len(all_predicted_values) > 1:
classification_report = sklearn.metrics.classification_report(
y_true, y_pred, digits=4, target_names=target_names, zero_division=0, labels=labels,
)
classification_report_dict = sklearn.metrics.classification_report(
y_true, y_pred, target_names=target_names, zero_division=0, output_dict=True, labels=labels,
)
accuracy_score = round(sklearn.metrics.accuracy_score(y_true, y_pred), 4)
precision_score = round(classification_report_dict["micro avg"]["precision"], 4)
recall_score = round(classification_report_dict["micro avg"]["recall"], 4)
micro_f_score = round(classification_report_dict["micro avg"]["f1-score"], 4)
macro_f_score = round(classification_report_dict["macro avg"]["f1-score"], 4)
main_score = classification_report_dict[main_evaluation_metric[0]][main_evaluation_metric[1]]
else:
# issue error and default all evaluation numbers to 0.
log.error(
"ACHTUNG! No gold labels and no all_predicted_values found! Could be an error in your corpus or how you "
"initialize the trainer!")
accuracy_score = precision_score = recall_score = micro_f_score = macro_f_score = main_score = 0.
classification_report = ""
classification_report_dict = {}
detailed_result = (
"\nResults:"
f"\n- F-score (micro) {micro_f_score}"
f"\n- F-score (macro) {macro_f_score}"
f"\n- Accuracy {accuracy_score}"
"\n\nBy class:\n" + classification_report
)
# line for log file
log_header = "PRECISION\tRECALL\tF1\tACCURACY"
log_line = f"{precision_score}\t" f"{recall_score}\t" f"{micro_f_score}\t" f"{accuracy_score}"
if average_over > 0:
eval_loss /= average_over
result = Result(
main_score=main_score,
log_line=log_line,
log_header=log_header,
detailed_results=detailed_result,
classification_report=classification_report_dict,
loss=eval_loss
)
return result
def _print_predictions(self, batch, gold_label_type):
lines = []
for datapoint in batch:
# check if there is a label mismatch
g = [label.identifier + label.value for label in datapoint.get_labels(gold_label_type)]
p = [label.identifier + label.value for label in datapoint.get_labels('predicted')]
g.sort()
p.sort()
correct_string = " -> MISMATCH!\n" if g != p else ""
# print info
eval_line = f"{datapoint.to_original_text()}\n" \
f" - Gold: {datapoint.get_labels(gold_label_type)}\n" \
f" - Pred: {datapoint.get_labels('predicted')}\n{correct_string}\n"
lines.append(eval_line)
return lines
class DefaultClassifier(Classifier):
"""Default base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Classifier and thus from flair.nn.Model. All features shared by all classifiers
are implemented here, including the loss calculation and the predict() method.
Currently, the TextClassifier, RelationExtractor, TextPairClassifier and SimpleSequenceTagger implement
this class. You only need to implement the forward_pass() method to implement this base class.
"""
def forward_pass(self,
sentences: Union[List[DataPoint], DataPoint],
return_label_candidates: bool = False,
):
"""This method does a forward pass through the model given a list of data points as input.
Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits
produced by the decoder and labels are the string labels for each data point.
Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True,
where data_points are the data points to which labels are added (commonly either Sentence or Token objects)
and candidate_labels are empty Label objects for each prediction (depending on the task Label,
SpanLabel or RelationLabel)."""
raise NotImplementedError
def __init__(self,
label_dictionary: Dictionary,
multi_label: bool = False,
multi_label_threshold: float = 0.5,
loss_weights: Dict[str, float] = None,
):
super().__init__()
# initialize the label dictionary
self.label_dictionary: Dictionary = label_dictionary
# set up multi-label logic
self.multi_label = multi_label
self.multi_label_threshold = multi_label_threshold
# loss weights and loss function
self.weight_dict = loss_weights
# Initialize the weight tensor
if loss_weights is not None:
n_classes = len(self.label_dictionary)
weight_list = [1.0 for i in range(n_classes)]
for i, tag in enumerate(self.label_dictionary.get_items()):
if tag in loss_weights.keys():
weight_list[i] = loss_weights[tag]
self.loss_weights = torch.FloatTensor(weight_list).to(flair.device)
else:
self.loss_weights = None
if self.multi_label:
self.loss_function = torch.nn.BCEWithLogitsLoss(weight=self.loss_weights)
else:
self.loss_function = torch.nn.CrossEntropyLoss(weight=self.loss_weights)
@property
def multi_label_threshold(self):
return self._multi_label_threshold
@multi_label_threshold.setter
def multi_label_threshold(self, x): # setter method
if type(x) is dict:
if 'default' in x:
self._multi_label_threshold = x
else:
raise Exception('multi_label_threshold dict should have a "default" key')
else:
self._multi_label_threshold = {'default': x}
def forward_loss(self, sentences: Union[List[DataPoint], DataPoint]) -> torch.tensor:
scores, labels = self.forward_pass(sentences)
return self._calculate_loss(scores, labels)
def _calculate_loss(self, scores, labels):
if not any(labels): return torch.tensor(0., requires_grad=True, device=flair.device), 1
if self.multi_label:
labels = torch.tensor([[1 if l in all_labels_for_point else 0 for l in self.label_dictionary.get_items()]
for all_labels_for_point in labels], dtype=torch.float, device=flair.device)
else:
labels = torch.tensor([self.label_dictionary.get_idx_for_item(label[0]) if len(label) > 0
else self.label_dictionary.get_idx_for_item('O')
for label in labels], dtype=torch.long, device=flair.device)
return self.loss_function(scores, labels), len(labels)
def predict(
self,
sentences: Union[List[Sentence], Sentence],
mini_batch_size: int = 32,
return_probabilities_for_all_classes: bool = False,
verbose: bool = False,
label_name: Optional[str] = None,
return_loss=False,
embedding_storage_mode="none",
):
"""
Predicts the class labels for the given sentences. The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
"""
if label_name is None:
label_name = self.label_type if self.label_type is not None else "label"
with torch.no_grad():
if not sentences:
return sentences
if isinstance(sentences, DataPoint):
sentences = [sentences]
# filter empty sentences
if isinstance(sentences[0], DataPoint):
sentences = [sentence for sentence in sentences if len(sentence) > 0]
if len(sentences) == 0:
return sentences
# reverse sort all sequences by their length
rev_order_len_index = sorted(range(len(sentences)), key=lambda k: len(sentences[k]), reverse=True)
reordered_sentences: List[Union[DataPoint, str]] = [sentences[index] for index in rev_order_len_index]
dataloader = DataLoader(dataset=SentenceDataset(reordered_sentences), batch_size=mini_batch_size)
# progress bar for verbosity
if verbose:
dataloader = tqdm(dataloader)
overall_loss = 0
batch_no = 0
label_count = 0
for batch in dataloader:
batch_no += 1
if verbose:
dataloader.set_description(f"Inferencing on batch {batch_no}")
# stop if all sentences are empty
if not batch:
continue
scores, gold_labels, data_points, label_candidates = self.forward_pass(batch,
return_label_candidates=True)
# remove previously predicted labels of this type
for sentence in data_points:
sentence.remove_labels(label_name)
if return_loss:
overall_loss += self._calculate_loss(scores, gold_labels)[0]
label_count += len(label_candidates)
# if anything could possibly be predicted
if len(label_candidates) > 0:
if self.multi_label:
sigmoided = torch.sigmoid(scores) # size: (n_sentences, n_classes)
n_labels = sigmoided.size(1)
for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == 'O': continue
label_threshold = self._get_label_threshold(label_value)
label_score = sigmoided[s_idx, l_idx].item()
if label_score > label_threshold or return_probabilities_for_all_classes:
label = label_candidate.spawn(value=label_value, score=label_score)
data_point.add_complex_label(label_name, label)
else:
softmax = torch.nn.functional.softmax(scores, dim=-1)
if return_probabilities_for_all_classes:
n_labels = softmax.size(1)
for s_idx, (data_point, label_candidate) in enumerate(zip(data_points, label_candidates)):
for l_idx in range(n_labels):
label_value = self.label_dictionary.get_item_for_index(l_idx)
if label_value == 'O': continue
label_score = softmax[s_idx, l_idx].item()
label = label_candidate.spawn(value=label_value, score=label_score)
data_point.add_complex_label(label_name, label)
else:
conf, idx = torch.max(softmax, dim=-1)
for data_point, label_candidate, c, i in zip(data_points, label_candidates, conf, idx):
label_value = self.label_dictionary.get_item_for_index(i.item())
if label_value == 'O': continue
label = label_candidate.spawn(value=label_value, score=c.item())
data_point.add_complex_label(label_name, label)
store_embeddings(batch, storage_mode=embedding_storage_mode)
if return_loss:
return overall_loss, label_count
def _get_label_threshold(self, label_value):
label_threshold = self.multi_label_threshold['default']
if label_value in self.multi_label_threshold:
label_threshold = self.multi_label_threshold[label_value]
return label_threshold
def __str__(self):
return super(flair.nn.Model, self).__str__().rstrip(')') + \
f' (weights): {self.weight_dict}\n' + \
f' (weight_tensor) {self.loss_weights}\n)'
| flair/nn/model.py | 27,081 | Abstract base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Model and adds a unified evaluate() function so that all classification models
use the same evaluation routines and compute the same numbers.
Currently, the SequenceTagger implements this class directly, while all other classifiers in Flair
implement the DefaultClassifier base class which implements Classifier.
Default base class for all Flair models that do classification, both single- and multi-label.
It inherits from flair.nn.Classifier and thus from flair.nn.Model. All features shared by all classifiers
are implemented here, including the loss calculation and the predict() method.
Currently, the TextClassifier, RelationExtractor, TextPairClassifier and SimpleSequenceTagger implement
this class. You only need to implement the forward_pass() method to implement this base class.
Abstract base class for all downstream task models in Flair, such as SequenceTagger and TextClassifier.
Every new type of model must implement these methods.
Returns the state dictionary for this model. Implementing this enables the save() and save_checkpoint()
functionality.
Initialize the model from a state dictionary. Implementing this enables the load() and load_checkpoint()
functionality.
Evaluates the model. Returns a Result object containing evaluation
results and a loss value. Implement this to enable evaluation.
:param data_loader: DataLoader that iterates over dataset to be evaluated
:param out_path: Optional output path to store predictions
:param embedding_storage_mode: One of 'none', 'cpu' or 'gpu'. 'none' means all embeddings are deleted and
freshly recomputed, 'cpu' means all embeddings are stored on CPU, or 'gpu' means all embeddings are stored on GPU
:return: Returns a Tuple consisting of a Result object and a loss float value
Performs a forward pass and returns a loss tensor for backpropagation. Implement this to enable training.
This method does a forward pass through the model given a list of data points as input.
Returns the tuple (scores, labels) if return_label_candidates = False, where scores are a tensor of logits
produced by the decoder and labels are the string labels for each data point.
Returns the tuple (scores, labels, data_points, candidate_labels) if return_label_candidates = True,
where data_points are the data points to which labels are added (commonly either Sentence or Token objects)
and candidate_labels are empty Label objects for each prediction (depending on the task Label,
SpanLabel or RelationLabel).
Each model predicts labels of a certain type. TODO: can we find a better name for this?
Loads the model from the given file.
:param model: the model file
:return: the loaded text classifier model
Predicts the class labels for the given sentences. The labels are directly added to the sentences.
:param sentences: list of sentences
:param mini_batch_size: mini batch size to use
:param return_probabilities_for_all_classes : return probabilities for all classes instead of only best predicted
:param verbose: set to True to display a progress bar
:param return_loss: set to True to return loss
:param label_name: set this to change the name of the label type that is predicted
:param embedding_storage_mode: default is 'none' which is always best. Only set to 'cpu' or 'gpu' if
you wish to not only predict, but also keep the generated embeddings in CPU or GPU memory respectively.
'gpu' to store embeddings in GPU memory.
Saves the current model to the provided file.
:param model_file: the model file
in Flair <0.9.1, optimizer and scheduler used to train model are not saved write out a "model card" if one is set special handling for optimizer: remember optimizer class and state dictionary save model restore optimizer and scheduler to model card if set load_big_file is a workaround by https://github.com/highway11git to load models on some Mac/Windows setups see https://github.com/zalandoresearch/flair/issues/351 read Dataset into data loader (if list of sentences passed, make Dataset first) loss calculation variables for printing variables for computing scores remove any previously predicted labels predict for batch get the gold labels add to all_predicted_values make printout lines write all_predicted_values to out_file if set make the evaluation dictionary finally, compute numbers now, calculate evaluation numbers there is at least one gold label or one prediction (default) issue error and default all evaluation numbers to 0. line for log file check if there is a label mismatch print info initialize the label dictionary set up multi-label logic loss weights and loss function Initialize the weight tensor setter method filter empty sentences reverse sort all sequences by their length progress bar for verbosity stop if all sentences are empty remove previously predicted labels of this type if anything could possibly be predicted size: (n_sentences, n_classes) | 4,969 | en | 0.801567 |
#!/usr/bin/python
import sys
import json
import numpy as np
import cv2
import zmq
import time
from keras.models import Sequential
from keras.layers.core import Dense, Dropout
from keras.optimizers import sgd
from os import listdir
from os.path import isfile, join
#-- Constants
imageSize = (128, 128)
hidden_size=20000
dataset_root_dir = "./dataset"
network_income_port = 9000
network_delivery_port = 9001
network_protocol = "tcp"
network_masked_ip = '127.0.0'#"192.168.14"
#-- Functions
def recieveImage(listener):
rc = listener.recv()
buf = buffer(rc)
rc = np.frombuffer(buf, dtype='uint8')
rc = list(rc)
rc = np.reshape(rc, (128, 128))
rc = rc.astype('uint8')
return rc
#-- Main Function
if __name__ == "__main__":
brain = Brain()
if (len(sys.argv) > 1 and sys.argv[1] == 'train'):
print "running in train mode"
if (len(sys.argv) > 2):
filename = sys.argv[2]
else:
filename = "model"
print "model : ", filename
brain.loadData()
brain.train()
brain.save('')
elif len(sys.argv) > 1 and sys.argv[1] == 'help':
print "Usage: " + sys.argv[0] + " [train | test | socket | collect] [model_name]\n"
elif len(sys.argv) > 1 and sys.argv[1] == 'collect':
print "runing in collection mode"
ctx = zmq.Context.instance()
listener = ctx.socket(zmq.REP)
listener.connect("{0}://{1}.{2}:{3}".format(network_protocol, network_masked_ip, '1', network_income_port))
#if (len(sys.argv) > 2):
# filename = sys.argv[2]
#else:
# filename = "model"
#brain.load(filename)
print "socket ready"
#listener.setsockopt(zmq.SUBSCRIBE, b'')
r = 0
while True:
# Recieveing Data
rc = recieveImage(listener);
rc = cv2.resize(rc, (300, 300), interpolation=0)
# Save recieved data
p = './data/frame_' + str(time.time()) + ".png"
cv2.imwrite(p, rc)
# Send responce
p = str(r)
listener.send_string(p)
cv2.imshow("img", rc)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
r = 1 - r
listener.close(linger=0)
ctx.term()
elif len(sys.argv) > 1 and sys.argv[1] == 'socket':
print "runing in socket mode"
ctx = zmq.Context.instance()
listener = ctx.socket(zmq.REP)
listener.connect("{0}://{1}.{2}:{3}".format(network_protocol, network_masked_ip, '1', network_income_port))
#if (len(sys.argv) > 2):
# filename = sys.argv[2]
#else:
# filename = "model"
#brain.load(filename)
print "socket ready"
#listener.setsockopt(zmq.SUBSCRIBE, b'')
r = 0
while True:
rc = recieveImage(listener);
rc = cv2.resize(rc, (300, 300), interpolation=0)
cv2.imshow("img", rc)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
listener.send(r)
r = 1 - r
#print brain.predict()
listener.close(linger=0)
ctx.term()
else:
print "runing in default mode"
if (len(sys.argv) > 1):
filename = sys.argv[1]
else:
filename = "model"
print "model : ", filename
brain.load(filename)
print brain.predict()
| src/modules/bd.py | 2,968 | !/usr/bin/python-- Constants"192.168.14"-- Functions-- Main Functionif (len(sys.argv) > 2): filename = sys.argv[2]else: filename = "model"brain.load(filename)listener.setsockopt(zmq.SUBSCRIBE, b'') Recieveing Data Save recieved data Send responceif (len(sys.argv) > 2): filename = sys.argv[2]else: filename = "model"brain.load(filename)listener.setsockopt(zmq.SUBSCRIBE, b'')print brain.predict() | 396 | en | 0.247587 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Database(pulumi.CustomResource):
charset: pulumi.Output[str]
collation: pulumi.Output[str]
instance: pulumi.Output[str]
name: pulumi.Output[str]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
def __init__(__self__, resource_name, opts=None, charset=None, collation=None, instance=None, name=None, project=None, __props__=None, __name__=None, __opts__=None):
"""
Create a Database resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['charset'] = charset
__props__['collation'] = collation
if instance is None:
raise TypeError("Missing required property 'instance'")
__props__['instance'] = instance
__props__['name'] = name
__props__['project'] = project
__props__['self_link'] = None
super(Database, __self__).__init__(
'gcp:sql/database:Database',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, charset=None, collation=None, instance=None, name=None, project=None, self_link=None):
"""
Get an existing Database resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["charset"] = charset
__props__["collation"] = collation
__props__["instance"] = instance
__props__["name"] = name
__props__["project"] = project
__props__["self_link"] = self_link
return Database(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| sdk/python/pulumi_gcp/sql/database.py | 4,427 | Create a Database resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.
Get an existing Database resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
:param pulumi.Input[str] self_link: The URI of the created resource.
> This content is derived from https://github.com/terraform-providers/terraform-provider-google/blob/master/website/docs/r/sql_database.html.markdown.
coding=utf-8 *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 1,358 | en | 0.765368 |
"""
Django settings for django_app project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [""]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_app']
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = "/var/www/django_app/django_app/static/"
| django_app/settings.py | 3,095 | Django settings for django_app project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
Build paths inside the project like this: BASE_DIR / 'subdir'. Quick-start development settings - unsuitable for production See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ SECURITY WARNING: keep the secret key used in production secret! SECURITY WARNING: don't run with debug turned on in production! Application definition Database https://docs.djangoproject.com/en/3.1/ref/settings/databases Password validation https://docs.djangoproject.com/en/3.1/ref/settings/auth-password-validators Internationalization https://docs.djangoproject.com/en/3.1/topics/i18n/ Static files (CSS, JavaScript, Images) https://docs.djangoproject.com/en/3.1/howto/static-files/ | 984 | en | 0.691599 |
# -*- coding: utf-8 -*-
SET_SUGGESTIONS = '='
| core/shell/shellSuggestion.py | 47 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
# Copyright 2017 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setuptools-based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
long_description = "Google Genomics Protos for Python."
setup(
name='genomics_protos',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description=long_description,
long_description=long_description,
# The project's main homepage.
url='',
# Author details
author='Thomas Colthurst, Jean-Philippe Martin',
author_email='thomaswc@google.com, jpmartin@google.com',
# Choose your license
license='Apache Software License',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
# What does your project relate to?
keywords='Genomics protos',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["googleapis-common-protos"],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': [],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'sample': [],
},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[],
# to be able to combine with other google packages
namespace_packages=[
'google', 'google.genomics',
'google.genomics.v1'
],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| setup.py | 4,347 | Setuptools-based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
Copyright 2017 Google. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Always prefer setuptools over distutils To use a consistent encoding Versions should comply with PEP440. For a discussion on single-sourcing the version across setup.py and the project code, see https://packaging.python.org/en/latest/single_source_version.html The project's main homepage. Author details Choose your license See https://pypi.python.org/pypi?%3Aaction=list_classifiers How mature is this project? Common values are 3 - Alpha 4 - Beta 5 - Production/Stable Indicate who your project is intended for Pick your license as you wish (should match "license" above) Specify the Python versions you support here. In particular, ensure that you indicate whether you support Python 2, Python 3 or both. What does your project relate to? You can just specify the packages manually here if your project is simple. Or you can use find_packages(). Alternatively, if you want to distribute just a my_module.py, uncomment this: py_modules=["my_module"], List run-time dependencies here. These will be installed by pip when your project is installed. For an analysis of "install_requires" vs pip's requirements files see: https://packaging.python.org/en/latest/requirements.html List additional groups of dependencies here (e.g. development dependencies). You can install these using the following syntax, for example: $ pip install -e .[dev,test] If there are data files included in your packages that need to be installed, specify them here. If using Python 2.6 or less, then these have to be included in MANIFEST.in as well. Although 'package_data' is the preferred approach, in some case you may need to place data files outside of your packages. See: http://docs.python.org/3.4/distutils/setupscript.htmlinstalling-additional-files noqa In this case, 'data_file' will be installed into '<sys.prefix>/my_data' to be able to combine with other google packages To provide executable scripts, use entry points in preference to the "scripts" keyword. Entry points provide cross-platform support and allow pip to create the appropriate form of executable for the target platform. entry_points={ 'console_scripts': [ 'sample=sample:main', ], }, | 2,849 | en | 0.814266 |
# This sample verifies that the exception type validation
# handles the case where the exception type is a Type[X] object.
from typing import Type
exc: Type[Exception] = Exception
try:
1 / 0
except exc:
print("exc")
| packages/pyright-internal/src/tests/samples/tryExcept3.py | 240 | This sample verifies that the exception type validation handles the case where the exception type is a Type[X] object. | 118 | en | 0.783309 |
import argparse
import copy
import os
import pickle
import random
import sys
from types import SimpleNamespace
import numpy as np
from domains.npuzzle import NPuzzle, macros
from experiments import search, iw, bfws
def parse_args():
"""Parse input arguments
Use --help to see a pretty description of the arguments
"""
if 'ipykernel' in sys.argv[0]:
sys.argv = [sys.argv[0]]
parser = argparse.ArgumentParser()
parser.add_argument('-n', type=int, default=15, choices=[8, 15, 24, 35, 48, 63, 80],
help='Number of tiles')
parser.add_argument('--random_seed','-s', type=int, default=1,
help='Seed to use for RNGs')
parser.add_argument('--macro_type','-m', type=str, default='primitive',
choices=['primitive','random','learned'],
help='Type of macro_list to consider during search')
parser.add_argument('--search_alg', type=str, default='gbfs',
choices = ['astar', 'gbfs', 'weighted_astar','bfws_r0', 'bfws_rg'],
help='Search algorithm to run')
parser.add_argument('--g_weight', type=float, default=None,
help='Weight for g-score in weighted A*')
parser.add_argument('--h_weight', type=float, default=None,
help='Weight for h-score in weighted A*')
parser.add_argument('--random_goal','-r', action='store_true', default=False,
help='Generate a random goal instead of the default solve configuration')
parser.add_argument('--max_transitions', type=lambda x: int(float(x)), default=5e5,
help='Maximum number of state transitions')
parser.add_argument('--bfws_precision', type=int, default=3,
help='The number of width values, w \in {1,...,P}, to use when the search algorithm is best-first width search')
return parser.parse_args()
def solve():
"""Instantiate an N-Puzzle and solve with the specified macro-actions and search algorithm"""
args = parse_args()
#
# Set up the scramble
random.seed(args.random_seed)
np.random.seed(args.random_seed)
start = NPuzzle(n=args.n).scramble(seed=args.random_seed)
if args.random_goal:
goal = NPuzzle(n=args.n).scramble(seed=args.random_seed+1000)
print('Using goal pattern: {:03d}'.format(args.random_seed+1000))
else:
goal = NPuzzle(n=args.n)
print('Using seed: {:03d}'.format(args.random_seed))
print('Start:', start)
print('Goal:', goal)
print('Start:', ' '.join(map(str,list(start))))
print('Goal: ', ' '.join(map(str,list(goal))))
# Define the macros / models
if args.macro_type == 'random':
macros.generate_random_macro_set(args.random_seed)
macro_namespace = {
'primitive': SimpleNamespace(macros=[], models=[]),
'random': macros.random,
'learned': macros.learned,
}[args.macro_type]
macro_list = macro_namespace.macros
model_list = macro_namespace.models
# Set up the search problem
search_fn = {
'astar': search.astar,
'gbfs': search.gbfs,
'weighted_astar': search.weighted_astar,
'bfws_r0': bfws.bfws,
'bfws_rg': bfws.bfws,
}[args.search_alg]
def get_successors(puz):
successors = [(copy.deepcopy(puz).transition(a), [a]) for a in puz.actions()]
if args.macro_type != 'primitive':
valid_macros = macro_list[puz.blank_idx]
valid_models = model_list[puz.blank_idx]
macro_successors = [(copy.deepcopy(puz).apply_macro(model=model), macro)
for (macro, model) in zip(valid_macros, valid_models)]
successors += macro_successors
return successors
search_dict = {
'start': start,
'is_goal': lambda node: node.state == goal,
'step_cost': lambda macro: 1,
'heuristic': lambda puz: len(puz.summarize_effects(baseline=goal)[0]),
'get_successors': get_successors,
'max_transitions': args.max_transitions,
}
if args.search_alg == 'weighted_astar':
assert (args.g_weight is not None
and args.h_weight is not None), 'Must specify weights if using weighted A*.'
gh_weights = (args.g_weight, args.h_weight)
search_dict['gh_weights'] = gh_weights
if 'bfws' in args.search_alg:
search_dict['precision'] = args.bfws_precision
if args.search_alg == 'bfws_rg':
goal_fns = [(lambda x, i=i: x.state[i] == goal[i]) for i, _ in enumerate(goal)]
relevant_atoms = iw.iw(1, start, get_successors, goal_fns)
if not relevant_atoms:
relevant_atoms = iw.iw(2, start, get_successors, goal_fns)
if not relevant_atoms:
relevant_atoms = start.all_atoms()
search_dict['R'] = relevant_atoms
#%% Run the search
search_results = search_fn(**search_dict)
#%% Save the results
tag = '{}-puzzle/'.format(args.n)
if args.random_goal:
tag += 'random_goal/'
else:
tag += 'default_goal/'
tag += args.macro_type
results_dir = 'results/npuzzle/{}/{}/'.format(args.search_alg,tag)
os.makedirs(results_dir, exist_ok=True)
with open(results_dir+'seed-{:03d}.pickle'.format(args.random_seed), 'wb') as file:
pickle.dump(search_results, file)
if __name__ == '__main__':
solve()
| experiments/npuzzle/solve.py | 5,462 | Parse input arguments
Use --help to see a pretty description of the arguments
Instantiate an N-Puzzle and solve with the specified macro-actions and search algorithm
Set up the scramble Define the macros / models Set up the search problem%% Run the search%% Save the results | 277 | en | 0.620819 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import datetime
from frappe.utils import formatdate, fmt_money, flt, cstr, cint, format_datetime, format_time, format_duration
from frappe.model.meta import get_field_currency, get_field_precision
import re
from six import string_types
def format_value(value, df=None, doc=None, currency=None, translated=False):
'''Format value based on given fieldtype, document reference, currency reference.
If docfield info (df) is not given, it will try and guess based on the datatype of the value'''
if isinstance(df, string_types):
df = frappe._dict(fieldtype=df)
if not df:
df = frappe._dict()
if isinstance(value, datetime.datetime):
df.fieldtype = 'Datetime'
elif isinstance(value, datetime.date):
df.fieldtype = 'Date'
elif isinstance(value, datetime.timedelta):
df.fieldtype = 'Time'
elif isinstance(value, int):
df.fieldtype = 'Int'
elif isinstance(value, float):
df.fieldtype = 'Float'
else:
df.fieldtype = 'Data'
elif (isinstance(df, dict)):
# Convert dict to object if necessary
df = frappe._dict(df)
if value is None:
value = ""
elif translated:
value = frappe._(value)
if not df:
return value
elif df.get("fieldtype")=="Date":
return formatdate(value)
elif df.get("fieldtype")=="Datetime":
return format_datetime(value)
elif df.get("fieldtype")=="Time":
return format_time(value)
elif value==0 and df.get("fieldtype") in ("Int", "Float", "Currency", "Percent") and df.get("print_hide_if_no_value"):
# this is required to show 0 as blank in table columns
return ""
elif df.get("fieldtype") == "Currency":
default_currency = frappe.db.get_default("currency")
currency = currency or get_field_currency(df, doc) or default_currency
return fmt_money(value, precision=get_field_precision(df, doc), currency=currency)
elif df.get("fieldtype") == "Float":
precision = get_field_precision(df, doc)
# I don't know why we support currency option for float
currency = currency or get_field_currency(df, doc)
# show 1.000000 as 1
# options should not specified
if not df.options and value is not None:
temp = cstr(value).split(".")
if len(temp)==1 or cint(temp[1])==0:
precision = 0
return fmt_money(value, precision=precision, currency=currency)
elif df.get("fieldtype") == "Percent":
return "{}%".format(flt(value, 2))
elif df.get("fieldtype") in ("Text", "Small Text"):
if not re.search(r"(<br|<div|<p)", value):
return frappe.safe_decode(value).replace("\n", "<br>")
elif df.get("fieldtype") == "Markdown Editor":
return frappe.utils.markdown(value)
elif df.get("fieldtype") == "Table MultiSelect":
meta = frappe.get_meta(df.options)
link_field = [df for df in meta.fields if df.fieldtype == 'Link'][0]
values = [v.get(link_field.fieldname, 'asdf') for v in value]
return ', '.join(values)
elif df.get("fieldtype") == "Duration":
hide_days = df.hide_days
return format_duration(value, hide_days)
elif df.get("fieldtype") == "Text Editor":
return "<div class='ql-snow'>{}</div>".format(value)
return value
| frappe/utils/formatters.py | 3,202 | Format value based on given fieldtype, document reference, currency reference.
If docfield info (df) is not given, it will try and guess based on the datatype of the value
Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors MIT License. See license.txt Convert dict to object if necessary this is required to show 0 as blank in table columns I don't know why we support currency option for float show 1.000000 as 1 options should not specified | 460 | en | 0.725689 |
import FWCore.ParameterSet.Config as cms
from RecoEgamma.PhotonIdentification.isolationCalculator_cfi import *
from RecoEgamma.PhotonIdentification.mipVariable_cfi import *
from RecoEcal.EgammaClusterProducers.hybridSuperClusters_cfi import *
from RecoEcal.EgammaClusterProducers.multi5x5BasicClusters_cfi import *
#
# producer for photons
#
photons = cms.EDProducer("GEDPhotonProducer",
photonProducer = cms.InputTag("photonCore"),
reconstructionStep = cms.string("tmp"),
outputPhotonCollection = cms.string(""),
pfEgammaCandidates = cms.InputTag(""),
valueMapPhotons = cms.string(""),
# photonCollection = cms.string(''),
regressionWeightsFromDB = cms.bool(True),
energyRegressionWeightsFileLocation = cms.string('/afs/cern.ch/user/b/bendavid/cmspublic/regweights/gbrph.root'),
energyRegressionWeightsDBLocation = cms.string('wgbrph'),
superClusterEnergyCorrFunction = cms.string("EcalClusterEnergyCorrection"),
superClusterEnergyErrorFunction = cms.string("EcalClusterEnergyUncertainty"),
superClusterCrackEnergyCorrFunction = cms.string("EcalClusterCrackCorrection"),
photonEcalEnergyCorrFunction = cms.string("EcalClusterEnergyCorrectionObjectSpecific"),
#candidateP4type = cms.string("fromRegression"),
candidateP4type = cms.string("fromEcalEnergy"),
isolationSumsCalculatorSet = cms.PSet(isolationSumsCalculator),
mipVariableSet = cms.PSet(mipVariable),
usePrimaryVertex = cms.bool(True),
primaryVertexProducer = cms.InputTag('offlinePrimaryVerticesWithBS'),
posCalc_t0_endcPresh = cms.double(3.6),
posCalc_logweight = cms.bool(True),
posCalc_w0 = cms.double(4.2),
hbheInstance = cms.string(''),
posCalc_t0_endc = cms.double(6.3),
barrelEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
hbheModule = cms.string('hbhereco'),
endcapEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
preshowerHits = cms.InputTag("ecalPreshowerRecHit","EcalRecHitsES"),
hcalTowers = cms.InputTag("towerMaker"),
runMIPTagger = cms.bool(True),
highEt = cms.double(100.),
minR9Barrel = cms.double(0.94),
minR9Endcap = cms.double(0.95),
hOverEConeSize = cms.double(0.15),
posCalc_x0 = cms.double(0.89),
posCalc_t0_barl = cms.double(7.7),
minSCEtBarrel = cms.double(10.0),
minSCEtEndcap = cms.double(10.0),
maxHoverEBarrel = cms.double(0.5),
maxHoverEEndcap = cms.double(0.5),
ecalRecHitSumEtOffsetBarrel = cms.double(999999999),
ecalRecHitSumEtSlopeBarrel = cms.double(0.),
ecalRecHitSumEtOffsetEndcap = cms.double(999999999),
ecalRecHitSumEtSlopeEndcap = cms.double(0.),
hcalTowerSumEtOffsetBarrel = cms.double(999999999),
hcalTowerSumEtSlopeBarrel = cms.double(0.),
hcalTowerSumEtOffsetEndcap = cms.double(999999999),
hcalTowerSumEtSlopeEndcap = cms.double(0.),
nTrackSolidConeBarrel =cms.double(999999999),
nTrackSolidConeEndcap =cms.double(999999999),
nTrackHollowConeBarrel =cms.double(999999999),
nTrackHollowConeEndcap =cms.double(999999999),
trackPtSumSolidConeBarrel =cms.double(999999999),
trackPtSumSolidConeEndcap =cms.double(999999999),
trackPtSumHollowConeBarrel =cms.double(999999999),
trackPtSumHollowConeEndcap =cms.double(999999999),
sigmaIetaIetaCutBarrel=cms.double(999999999),
sigmaIetaIetaCutEndcap=cms.double(999999999),
posCalcParameters = cms.PSet( T0_barl = cms.double(7.4),
T0_endc = cms.double(6.3),
T0_endcPresh = cms.double(3.6),
LogWeighted = cms.bool(True),
W0 = cms.double(4.2),
X0 = cms.double(0.89)
),
RecHitFlagToBeExcludedEB = cleanedHybridSuperClusters.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEB = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
RecHitFlagToBeExcludedEE = multi5x5BasicClustersCleaned.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEE = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
checkHcalStatus = cms.bool(True),
)
photonsFromMultiCl = photons.clone(
photonProducer = 'photonCoreFromMultiCl'
)
islandPhotons = cms.EDProducer("PhotonProducer",
photonCoreProducer = cms.InputTag("islandPhotonCore"),
regressionWeightsFromDB = cms.bool(True),
energyRegressionWeightsFileLocation = cms.string('/afs/cern.ch/user/b/bendavid/cmspublic/regweights/gbrph.root'),
energyRegressionWeightsDBLocation = cms.string('wgbrph'),
superClusterEnergyCorrFunction = cms.string("EcalClusterEnergyCorrection"),
superClusterEnergyErrorFunction = cms.string("EcalClusterEnergyUncertainty"),
superClusterCrackEnergyCorrFunction = cms.string("EcalClusterCrackCorrection"),
photonEcalEnergyCorrFunction = cms.string("EcalClusterEnergyCorrectionObjectSpecific"),
candidateP4type = cms.string("fromEcalEnergy"),
isolationSumsCalculatorSet = cms.PSet(isolationSumsCalculator),
mipVariableSet = cms.PSet(mipVariable),
usePrimaryVertex = cms.bool(True),
primaryVertexProducer = cms.InputTag('offlinePrimaryVerticesWithBS'),
posCalc_t0_endcPresh = cms.double(3.6),
posCalc_logweight = cms.bool(True),
posCalc_w0 = cms.double(4.2),
hbheInstance = cms.string(''),
posCalc_t0_endc = cms.double(6.3),
barrelEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEB"),
hbheModule = cms.string('hbhereco'),
endcapEcalHits = cms.InputTag("ecalRecHit","EcalRecHitsEE"),
hcalTowers = cms.InputTag("towerMaker"),
runMIPTagger = cms.bool(True),
highEt = cms.double(100.),
minR9Barrel = cms.double(10.0),
minR9Endcap = cms.double(10.0),
hOverEConeSize = cms.double(0.15),
posCalc_x0 = cms.double(0.89),
posCalc_t0_barl = cms.double(7.7),
minSCEtBarrel = cms.double(5.0),
minSCEtEndcap = cms.double(15.0),
maxHoverEBarrel = cms.double(0.99),
maxHoverEEndcap = cms.double(0.5),
ecalRecHitSumEtOffsetBarrel = cms.double(999999999),
ecalRecHitSumEtSlopeBarrel = cms.double(0.),
ecalRecHitSumEtOffsetEndcap = cms.double(999999999),
ecalRecHitSumEtSlopeEndcap = cms.double(0.),
hcalTowerSumEtOffsetBarrel = cms.double(999999999),
hcalTowerSumEtSlopeBarrel = cms.double(0.),
hcalTowerSumEtOffsetEndcap = cms.double(999999999),
hcalTowerSumEtSlopeEndcap = cms.double(0.),
nTrackSolidConeBarrel =cms.double(999999999),
nTrackSolidConeEndcap =cms.double(999999999),
nTrackHollowConeBarrel =cms.double(999999999),
nTrackHollowConeEndcap =cms.double(999999999),
trackPtSumSolidConeBarrel =cms.double(999999999),
trackPtSumSolidConeEndcap =cms.double(999999999),
trackPtSumHollowConeBarrel =cms.double(999999999),
trackPtSumHollowConeEndcap =cms.double(999999999),
sigmaIetaIetaCutBarrel=cms.double(999999999),
sigmaIetaIetaCutEndcap=cms.double(999999999),
posCalcParameters = cms.PSet( T0_barl = cms.double(7.4),
T0_endc = cms.double(6.3),
T0_endcPresh = cms.double(3.6),
LogWeighted = cms.bool(True),
W0 = cms.double(4.2),
X0 = cms.double(0.89)
),
RecHitFlagToBeExcludedEB = cleanedHybridSuperClusters.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEB = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
RecHitFlagToBeExcludedEE = multi5x5BasicClustersCleaned.RecHitFlagToBeExcluded,
RecHitSeverityToBeExcludedEE = cleanedHybridSuperClusters.RecHitSeverityToBeExcluded,
)
| RecoEgamma/EgammaPhotonProducers/python/photons_cfi.py | 7,990 | producer for photons photonCollection = cms.string(''),candidateP4type = cms.string("fromRegression"), | 104 | en | 0.560688 |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.storage import StorageEngine, MySQLStoreType
from fate_arch.storage import StorageTableBase
class StorageTable(StorageTableBase):
def __init__(
self,
cur,
con,
address=None,
name: str = None,
namespace: str = None,
partitions: int = 1,
store_type: MySQLStoreType = MySQLStoreType.InnoDB,
options=None,
):
super(StorageTable, self).__init__(
name=name,
namespace=namespace,
address=address,
partitions=partitions,
options=options,
engine=StorageEngine.MYSQL,
store_type=store_type,
)
self._cur = cur
self._con = con
def check_address(self):
schema = self.meta.get_schema()
if schema:
sql = "SELECT {},{} FROM {}".format(
schema.get("sid"), schema.get("header"), self._address.name
)
feature_data = self.execute(sql)
for feature in feature_data:
if feature:
break
return True
@staticmethod
def get_meta_header(feature_name_list):
create_features = ""
feature_list = []
feature_size = "varchar(255)"
for feature_name in feature_name_list:
create_features += "{} {},".format(feature_name, feature_size)
feature_list.append(feature_name)
return create_features, feature_list
def _count(self):
sql = "select count(*) from {}".format(self._address.name)
try:
self._cur.execute(sql)
# self.con.commit()
ret = self._cur.fetchall()
count = ret[0][0]
except:
count = 0
return count
def _collect(self, **kwargs) -> list:
id_name, feature_name_list, _ = self._get_id_feature_name()
id_feature_name = [id_name]
id_feature_name.extend(feature_name_list)
sql = "select {} from {}".format(",".join(id_feature_name), self._address.name)
data = self.execute(sql)
for line in data:
feature_list = [str(feature) for feature in list(line[1:])]
yield line[0], self.meta.get_id_delimiter().join(feature_list)
def _put_all(self, kv_list, **kwargs):
id_name, feature_name_list, id_delimiter = self._get_id_feature_name()
feature_sql, feature_list = StorageTable.get_meta_header(feature_name_list)
id_size = "varchar(100)"
create_table = (
"create table if not exists {}({} {} NOT NULL, {} PRIMARY KEY({}))".format(
self._address.name, id_name, id_size, feature_sql, id_name
)
)
self._cur.execute(create_table)
sql = "REPLACE INTO {}({}, {}) VALUES".format(
self._address.name, id_name, ",".join(feature_list)
)
for kv in kv_list:
sql += '("{}", "{}"),'.format(kv[0], '", "'.join(kv[1].split(id_delimiter)))
sql = ",".join(sql.split(",")[:-1]) + ";"
self._cur.execute(sql)
self._con.commit()
def _destroy(self):
sql = "drop table {}".format(self._address.name)
self._cur.execute(sql)
self._con.commit()
def _save_as(self, address, name, namespace, partitions=None, **kwargs):
sql = "create table {}.{} select * from {};".format(namespace, name, self._address.name)
self._cur.execute(sql)
self._con.commit()
def execute(self, sql, select=True):
self._cur.execute(sql)
if select:
while True:
result = self._cur.fetchone()
if result:
yield result
else:
break
else:
result = self._cur.fetchall()
return result
def _get_id_feature_name(self):
id = self.meta.get_schema().get("sid", "id")
header = self.meta.get_schema().get("header")
id_delimiter = self.meta.get_id_delimiter()
if header:
if isinstance(header, str):
feature_list = header.split(id_delimiter)
elif isinstance(header, list):
feature_list = header
else:
feature_list = [header]
else:
raise Exception("mysql table need data header")
return id, feature_list, id_delimiter
| python/fate_arch/storage/mysql/_table.py | 5,034 | Copyright 2019 The FATE Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. self.con.commit() | 602 | en | 0.860858 |
import requests
from . import FeedSource, _request_headers
# pylint: disable=no-member
class WorldCoinIndex(FeedSource): # Weighted average from WorldCoinIndex
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timeout = getattr(self, 'timeout', 15)
if not hasattr(self, 'api_key'):
raise Exception("WorldCoinIndex FeedSource requires 'api_key'.")
def _fetch(self):
feed = {}
for base in self.bases:
url = "https://www.worldcoinindex.com/apiservice/v2getmarkets?key={apikey}&fiat={base}"
response = requests.get(url=url.format(apikey=self.api_key, base=base),
headers=_request_headers, timeout=self.timeout)
result = response.json()['Markets']
for market in result:
for ticker in market:
(quote, returnedBase) = ticker['Label'].split('/')
if base == returnedBase and quote in self.quotes:
self.add_rate(feed, base, quote, ticker['Price'], ticker['Volume_24h'] / ticker['Price'])
return feed
| bitshares_pricefeed/sources/worldcoinindex.py | 1,152 | pylint: disable=no-member Weighted average from WorldCoinIndex | 62 | en | 0.761682 |
# Copyright 2016, VIXL authors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ARM Limited nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import util
def FilterKnownValgrindTestFailures(tests):
rc, output = util.getstatusoutput('valgrind --version')
if rc != 0:
util.abort('Failed to get the Valgrind version.')
version = re.search('^valgrind-([0-9]+)\.([0-9]+)\.([0-9]+)', output)
if not version:
util.abort('Failed to get the Valgrind version.')
major = int(version.group(1))
minor = int(version.group(2))
if major > 3 or (major == 3 and minor > 10):
return tests
reason = "Valgrind versions before 3.11 have issues with fused multiply-add, " \
"so disable the affected tests."
known_valgrind_test_failures = {
'AARCH64_SIM_fmadd_d',
'AARCH64_SIM_fmadd_s',
'AARCH64_SIM_fmla_2D',
'AARCH64_SIM_fmla_2D_2D_D',
'AARCH64_SIM_fmla_2S',
'AARCH64_SIM_fmla_2S_2S_S',
'AARCH64_SIM_fmla_4S',
'AARCH64_SIM_fmla_4S_4S_S',
'AARCH64_SIM_fmla_D_D_D',
'AARCH64_SIM_fmls_2D',
'AARCH64_SIM_fmls_2D_2D_D',
'AARCH64_SIM_fmls_2S',
'AARCH64_SIM_fmls_2S_2S_S',
'AARCH64_SIM_fmls_4S',
'AARCH64_SIM_fmls_4S_4S_S',
'AARCH64_SIM_fmls_D_D_D',
'AARCH64_SIM_fmsub_d',
'AARCH64_SIM_fmsub_s',
'AARCH64_SIM_fnmadd_d',
'AARCH64_SIM_fnmadd_s',
'AARCH64_SIM_fnmsub_d',
'AARCH64_SIM_fnmsub_s',
'AARCH64_SIM_frecps_2D',
'AARCH64_SIM_frecps_D',
'AARCH64_SIM_frsqrts_2D',
'AARCH64_SIM_frsqrts_D'
}
filtered_list = [x for x in tests if x not in known_valgrind_test_failures]
return (filtered_list, len(tests) - len(filtered_list), reason)
def FilterKnownTestFailures(tests, **env):
skipped = []
if env.get('under_valgrind'):
tests, n_tests_skipped, reason = FilterKnownValgrindTestFailures(tests)
skipped.append( (n_tests_skipped, reason) )
return (tests, skipped)
| tools/known_test_failures.py | 3,308 | Copyright 2016, VIXL authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of ARM Limited nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1,482 | en | 0.890503 |
from batchgenerators.utilities.file_and_folder_operations import *
import numpy as np
if __name__ == '__main__':
# input_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_plans_3D.pkl'
# output_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_LISA_plans_3D.pkl'
# a = load_pickle(input_file)
# a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))
# save_pickle(a, output_file)
input_file = '../../data/nnUNet_preprocessed/Task100_LiTSbaseline/nnUNetPlansv2.1_plans_3D.pkl'
output_file = '../../data/nnUNet_preprocessed/Task100_LiTSbaseline/nnUNetPlansv2.1_plans_3D.pkl'
a = load_pickle(input_file)
print(a['plans_per_stage'])
# a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size']))
a['plans_per_stage'][0]['patch_size'] = np.array([128, 128, 128])
a['plans_per_stage'][1]['patch_size'] = np.array([128, 128, 128])
a['plans_per_stage'][0]['num_pool_per_axis'] = np.array([5, 5, 5])
a['plans_per_stage'][1]['num_pool_per_axis'] = np.array([5, 5, 5])
a['plans_per_stage'][0]['pool_op_kernel_sizes'] = [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
a['plans_per_stage'][1]['pool_op_kernel_sizes'] = [[2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2], [2, 2, 2]]
a['plans_per_stage'][0]['conv_kernel_sizes'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]
a['plans_per_stage'][1]['conv_kernel_sizes'] = [[3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3], [3, 3, 3]]
save_pickle(a, output_file) | nnunet/experiment_planning/change_batch_size.py | 1,657 | input_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_plans_3D.pkl' output_file = '/home/fabian/data/nnUNet_preprocessed/Task004_Hippocampus/nnUNetPlansv2.1_LISA_plans_3D.pkl' a = load_pickle(input_file) a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size'])) save_pickle(a, output_file) a['plans_per_stage'][0]['batch_size'] = int(np.floor(6 / 9 * a['plans_per_stage'][0]['batch_size'])) | 467 | en | 0.430093 |
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import RK45
f_out = "E:\\1\\P_rk4.txt" # address file for output
f2 = open(f_out,"w+")
def du_dx(x,y):
wa=1 # atomic frequency
wp=0.6 # field frequency
g=0.6 # coupling strength
n = 1 # number of photons
A = n*wp+(wa/2)
B = (1+n)*wp-(wa/2)
X = n+1
C = np.sqrt(X)
dydx_1= A*y[1]+g*C*y[3]
dydx_2= -A*y[0]-g*C*y[2]
dydx_3= B*y[3]+g*C*y[1]
dydx_4= -B*y[2]-g*C*y[0]
return [dydx_1,dydx_2,dydx_3,dydx_4]
y_0 = (1/np.sqrt(2),0,1/np.sqrt(2),0) # initial value
# print("y_0 = ",y_0)
m = 1000
ti = 0
tf = 30
h = tf/m
tspan = np.arange(ti,tf,h)
print(h)
for i in tspan:
print(i)
v = RK45(du_dx,t0 =i,y0 = y_0,t_bound=i) # 4 answer of dydx_1,...,dydx_4
print(v.y[0:])
# print(type(v))
# print("v.t[0] = ",v.t[0])
# print(len(v.t))
# print("------------------")
# print(v.y)
# print(len(v.t))
# print("------------------")
# y_1 = v.y[:,0]
# print("y_1 = ",y_1)
# print("------------------")
# y_2 = v.y[0,:]
# print("y_2 = ",y_2)
# print("------------------")
# y_3 = v.y[0,0]
# print("y_3 = ",y_3)
# print("------------------")
# # --------------------------
# # print in file
# count = 0
# while count<1000:
# y_i = v.y[:,count]
# f2.write(str(v.t[count]))
# f2.write(" ")
# for i in y_i:
# i = round(i,4)
# i = str(i)
# f2.write(i)
# f2.write(len(i)*" ")
# f2.write("\n")
# count = count+1
# # y_prime = u_s[:,1]
# # print(y_prime)
# plt.plot(v.t, v.y[0,:],'-', label='r(t)')
# plt.xlabel("x")
# plt.ylabel("y")
# plt.show() | RK45 - Copy.py | 1,642 | address file for output atomic frequency field frequency coupling strength number of photons initial value print("y_0 = ",y_0) 4 answer of dydx_1,...,dydx_4 print(type(v)) print("v.t[0] = ",v.t[0]) print(len(v.t)) print("------------------") print(v.y) print(len(v.t)) print("------------------") y_1 = v.y[:,0] print("y_1 = ",y_1) print("------------------") y_2 = v.y[0,:] print("y_2 = ",y_2) print("------------------") y_3 = v.y[0,0] print("y_3 = ",y_3) print("------------------") -------------------------- print in file count = 0 while count<1000: y_i = v.y[:,count] f2.write(str(v.t[count])) f2.write(" ") for i in y_i: i = round(i,4) i = str(i) f2.write(i) f2.write(len(i)*" ") f2.write("\n") count = count+1 y_prime = u_s[:,1] print(y_prime) plt.plot(v.t, v.y[0,:],'-', label='r(t)') plt.xlabel("x") plt.ylabel("y") plt.show() | 905 | en | 0.348858 |
"""
Utility functions for the btcpayserver client
"""
import pickle
from app.db import get_db
from config import Config
def get_client():
"""
Loads the serialized client from database
"""
db = get_db()
pickled_client = db.execute(
"SELECT pickled_client FROM btc_pay_server_client ORDER BY id"
).fetchone()
return pickle.loads(pickled_client['pickled_client'])
def create_invoice(price=Config.TIP_AMOUNT, currency=Config.TIP_CURRENCY, order_id=None, desc=None, notification_url=None, redirect_url=None):
"""
Creates a new invoice and returns invoice id
:param price: a given price (default is bitcoin)
:param currency: currency ticker from bitpay API: 'USD', 'EUR', 'BTC' etc
:return: invoice_id -> str
"""
client = get_client()
try:
new_invoice = client.create_invoice(
{
'price': price,
'currency': currency,
'orderId': order_id,
'itemDesc': desc,
'notificationUrl': notification_url,
'redirectUrl': redirect_url
}
)
return new_invoice['id']
except Exception as e:
print(e)
return 'XXX'
def get_invoice(invoice_id: str):
"""
Get an invoice by ID
"""
client = get_client()
return client.get_invoice(invoice_id)
def get_most_recent_invoice():
"""
Returns the most return invoice created
"""
client = get_client()
return client.get_invoices()[:1]
| app/btcpayserver_helper.py | 1,526 | Creates a new invoice and returns invoice id
:param price: a given price (default is bitcoin)
:param currency: currency ticker from bitpay API: 'USD', 'EUR', 'BTC' etc
:return: invoice_id -> str
Loads the serialized client from database
Get an invoice by ID
Returns the most return invoice created
Utility functions for the btcpayserver client | 343 | en | 0.693527 |
DEFAULT_INIT = "variance_scaling_initializer(mode='fan_in', distribution='uniform', scale=0.78)"
# Patchining in some alternate conformer arcitectures
def add_SE_block(network, in_layer, name_prefix, se_act="swish"):
# This adds and SE block anywhere
# Returns the output layer name
network[name_prefix + "_SE_reduce"] = {
"class" : "reduce",
"mode" : "mean",
"from" : in_layer,
"axes" : "T"
}
network[name_prefix + "_SE_linear1"] = {
"class" : "linear",
"from" : name_prefix + "_SE_reduce",
"n_out" : 32
}
network[name_prefix + "_SE_act1"] = {
"class" : "activation",
"activation" : se_act,
"from" : name_prefix + "_SE_linear1"
}
network[name_prefix + "_SE_linear2"] = {
"class" : "linear",
"from" : name_prefix + "_SE_act1",
"n_out" : 256
}
network[name_prefix + "_SE_elm_mul"] = {
"class" : "eval",
"eval" : "source(0) * source(1)",
"from" : [name_prefix + "_SE_linear2", in_layer]
}
return name_prefix + "_SE_elm_mul"
def conformer_enc_layer_all_in_one_SE(
network, name, num_heads, model_dim, key_dim, value_dim, ff_dim,
kernel_size,
sa_dropout, sa_post_dropout, ff_activation_dropout, ff_post_dropout,
from_layers, conv_post_dropout,
initialization=DEFAULT_INIT, ff_activation="swish",
end_layernorm=False,
normal_conv=False, output_channels=16,
kernel_size_for_feature=3,
attention_left_only=False, separated=False,
windowing=False, window_size=None, gauss_window=False,
relative_pe=False, fixed=False, clipping=100, untied_pe=False, relative_pe_transformer_xl=False,
linear_mapping = True, linear_mapping_bias = False, switch = False,
energy_factor = -0.5,
half_ratio = 0.5,
half_ratio_levels = None,
with_se = True,
se_pos = None,
se_act = "swish"
):
if windowing or untied_pe or relative_pe_transformer_xl or energy_factor != -0.5:
assert separated
if with_se:
assert not se_pos is None, "this version needs se_pos != None"
if half_ratio_levels is not None:
idx = int(name.split("_")[-1]) - 1 # Hack but does the trick
half_ratio = half_ratio_levels[idx]
if from_layers is None:
from_layers = ["data"]
elif isinstance(from_layers, str):
from_layers = [from_layers]
## first ffn with residual connection
network[f"{name}_ff1_laynorm"] = {'class': "layer_norm",
'from': from_layers}
network[f"{name}_ff1_conv1"] = {
'class': "linear", 'activation': ff_activation, 'with_bias': True,
'from': [f"{name}_ff1_laynorm"],
'n_out': ff_dim, 'forward_weights_init': initialization
}
network[f"{name}_ff1_conv2"] = {
'class': "linear", 'activation': None, 'with_bias': True,
'from': [f"{name}_ff1_conv1"], 'dropout': ff_activation_dropout,
'n_out': model_dim, 'forward_weights_init': initialization
}
network[f"{name}_ff1_drop"] = {'class': "dropout",
'dropout': ff_post_dropout,
'from': [f"{name}_ff1_conv2"]}
network[f"{name}_ff1_drop_half"] = {
'class': "eval",
'eval': f"{half_ratio} * source(0)",
'from': [f"{name}_ff1_drop"]
}
network[f"{name}_ff1_out"] = {
'class': "combine", 'kind': "add",
'from': from_layers + [f"{name}_ff1_drop_half"]
}
## MHSA module
network[f"{name}_self_att_laynorm"] = {'class': "layer_norm",
'from': [f"{name}_ff1_out"]}
if separated:
key_per_head = int(key_dim / num_heads)
value_per_head = int(value_dim / num_heads)
network[f"{name}_att_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_self_att_laynorm"], 'n_out': key_dim,
'forward_weights_init': initialization
}
# query per head
network[f"{name}_att_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': [f"{name}_att_query0"],
}
network[f"{name}_att_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_self_att_laynorm"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network[f"{name}_att_value0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_self_att_laynorm"], 'n_out': value_dim,
'forward_weights_init': initialization}
## split the key and value vectors for each head
network[f"{name}_att_key"] = {
'class': "split_dims", 'axis': "F", 'dims': (num_heads, key_per_head),
'from': [f"{name}_att_key0"], # (B, enc-T, H, D/H)
}
network[f"{name}_att_value"] = {
'class': "split_dims", 'axis': "F", 'dims': (num_heads, value_per_head),
'from': [f"{name}_att_value0"], # (B, enc-T, H, D'/H)
}
## encoder-decoder energy
## we have exactly enc-T energy values
network[f"{name}_att_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_key", f"{name}_att_query"]} # (B, H, key-T, query-T)
## normalize the attention weights (depends on key/query dim.)
network[f"{name}_att_weights"] = {
'class': "softmax_over_spatial", 'from': [f"{name}_att_energy"],
'energy_factor': key_per_head ** energy_factor, # (B, H, key-T, query-T), key-T is where softmax is performed
}
# relative_pe as in transformer xl
if relative_pe_transformer_xl and not relative_pe and not untied_pe:
shared_layers = False
network[f"{name}_att_emb_emb"] = network[f"{name}_att_energy"]
# (B, enc-T, d_pos)
assert 'source' in network
if 'pos' not in network:
network["pos"] = {
'class': "positional_encoding",
'add_to_input': False,
'from': ["source"],
'n_out': model_dim
}
# network['pos_with_0'] = {
# "class": "eval", "from": ["pos"],
# "eval": f"tf.slice(tf.concat([tf.expand_dims(tf.tile(tf.reshape([0, 1] * ({model_dim}//2), " \
# f"(1, {model_dim})), [tf.shape(source(0))[0], 1]), 1), source(0)], 1), [0, 0, 0], [-1, tf.shape(source(0))[1], -1])"}
if shared_layers:
network["att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ['pos'], 'n_out': key_dim, # (B, enc-T, D) # pos_with_0
'forward_weights_init': initialization,
}
network["att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': ["att_pos_key0"], # (B, enc-T, H, D/H)
}
else:
network[f"{name}_att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ['pos'], 'n_out': key_dim, # (B, enc-T, D) # pos_with_0
'forward_weights_init': initialization,
}
network[f"{name}_att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': [f"{name}_att_pos_key0"], # (B, enc-T, H, D/H)
}
# (B, enc-T, H, D/H), (B, dec-T, H, D/H) -> (B, H, enc-T, dec-T)
network[f"{name}_att_emb_pos"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_pos_key", f"{name}_att_query"]
}
if shared_layers:
network[f"{name}_att_emb_pos"]['from'] = ["att_pos_key", f"{name}_att_query"]
# (B, H, enc-T, dec-T)
network[f"{name}_att_emb_pos_shifted"] = {
'class': "eval",
'eval': "self.network.get_config().typed_value('rel_shift')(source(0))",
'from': [f"{name}_att_emb_pos"],
'out_type': {'shape': (num_heads, None, None),
'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1}
}
# (B, 4, F)
if shared_layers:
network["pos_emb_bias"] = {
'class': "variable",
'shape': (num_heads, key_per_head),
'add_time_axis': True,
'init': DEFAULT_INIT
}
else:
network[f"{name}_pos_emb_bias"] = {
'class': "variable",
'shape': (num_heads, key_per_head),
'add_time_axis': True,
'init': DEFAULT_INIT
}
# (B, enc-T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T=1)
network[f"{name}_att_pos_emb"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_key", f"{name}_pos_emb_bias"],
'out_type': {'shape': (num_heads, None, 1)}
#'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads}
}
if shared_layers:
network[f"{name}_att_pos_emb"]['from'] = [f"{name}_att_key", "pos_emb_bias"]
network[f"{name}_att_pos_emb_tiled"] = {
'class': "rel_shift",
'rel_shift': False,
'from': [f"{name}_att_pos_emb"],
'out_type': {'shape': (num_heads, None, None),
'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, 'dim': num_heads}
}
if shared_layers:
network["pos_pos_bias"] = {
'class': "variable",
'shape': (num_heads, key_per_head), # (B, d, 4)
'add_time_axis': True,
'init': DEFAULT_INIT
}
# (B, enc - T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T = 1)
network["att_pos_pos"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': ["att_pos_key", "pos_pos_bias"],
'out_type': {'shape': (num_heads, None, 1)}
# 'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads}
}
# (B, H, T, T')
network["att_pos_pos_shifted"] = {
'class': "rel_shift",
'from': ["att_pos_pos"],
'out_type': {'shape': (num_heads, None, None),
'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, 'dim': num_heads}
}
else:
network[f"{name}_pos_pos_bias"] = {
'class': "variable",
'shape': (num_heads, key_per_head), #(B, d, 4)
'add_time_axis': True,
'init': DEFAULT_INIT
}
# (B, enc - T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T = 1)
network[f"{name}_att_pos_pos"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_pos_key", f"{name}_pos_pos_bias"],
'out_type': {'shape': (num_heads, None, 1)}
#'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads}
}
# (B, H, T, T')
network[f"{name}_att_pos_pos_shifted"] = {
'class': "rel_shift",
'from': [f"{name}_att_pos_pos"],
'out_type': {'shape': (num_heads, None, None),
'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, 'dim': num_heads}
}
network[f"{name}_att_energy"] = {
'class': "combine",
'kind': "add",
'from': [f"{name}_att_emb_emb", f"{name}_att_pos_emb_tiled",
f"{name}_att_emb_pos_shifted", f"{name}_att_pos_pos_shifted"]
}
if shared_layers:
network[f"{name}_att_energy"]['from'] = [f"{name}_att_emb_emb", f"{name}_att_pos_emb_tiled",
f"{name}_att_emb_pos_shifted", "att_pos_pos_shifted"]
if untied_pe and not relative_pe:
assert 'source' in network
if 'pos' not in network:
network["pos"] = {
'class': "positional_encoding",
'add_to_input': False,
'from': ["source"],
'n_out': model_dim
}
# shared
if False:
if 'att_pos_query0' not in network:
network["att_pos_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim,
'forward_weights_init': initialization}
network["att_pos_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': ["att_pos_query0"],
}
network["att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network["att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': ["att_pos_key0"], # (B, enc-T, H, D/H)
}
network["att_pos_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': ["att_pos_key", "att_pos_query"]}
network[f"{name}_att_energy_with_pos_corr"] = {
'class': "combine",
'kind': "add",
'from': [f"{name}_att_energy", "att_pos_energy"]
}
# per layer
if False:
network[f"{name}_att_pos_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim,
'forward_weights_init': initialization}
network[f"{name}_att_pos_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': [f"{name}_att_pos_query0"],
}
network[f"{name}_att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network[f"{name}_att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': [f"{name}_att_pos_key0"], # (B, enc-T, H, D/H)
}
network[f"{name}_att_pos_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_pos_key", f"{name}_att_pos_query"]}
network[f"{name}_att_energy_with_pos_corr"] = {
'class': "combine",
'kind': "add",
'from': [f"{name}_att_energy", f"{name}_att_pos_energy"]
}
# with corrected normalization factor
if True:
network[f"{name}_att_pos_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim,
'forward_weights_init': initialization}
network[f"{name}_att_pos_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': [f"{name}_att_pos_query0"],
}
network[f"{name}_att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network[f"{name}_att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': [f"{name}_att_pos_key0"], # (B, enc-T, H, D/H)
}
network[f"{name}_att_pos_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': [f"{name}_att_pos_key", f"{name}_att_pos_query"]}
network[f"{name}_att_energy_with_pos_corr"] = {
'class': "combine",
'kind': "add",
'from': [f"{name}_att_energy", f"{name}_att_pos_energy"]
}
network[f"{name}_att_weights"]['energy_factor'] = (2 * key_per_head) ** energy_factor
# scale per layer
if False:
if 'att_pos_query0' not in network:
network["att_pos_query0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim,
'forward_weights_init': initialization}
network["att_pos_query"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head), # (B, T, H, D/H)
'from': ["att_pos_query0"],
}
network["att_pos_key0"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': ["pos"], 'n_out': key_dim, # (B, enc-T, D)
'forward_weights_init': initialization,
}
network["att_pos_key"] = {
'class': "split_dims", 'axis': "F",
'dims': (num_heads, key_per_head),
'from': ["att_pos_key0"], # (B, enc-T, H, D/H)
}
network["att_pos_energy"] = {
'class': "dot", 'red1': -1, 'red2': -1, 'var1': "T", 'var2': "T?",
'from': ["att_pos_key", "att_pos_query"]}
network[f"{name}_att_pos_energy_scale"] = {
'class': 'variable',
'shape': (num_heads,),
'init': 1.0,
'add_batch_axis': False
}
network[f"{name}_att_energy_with_pos_corr"] = {
'class': "eval",
'eval': f"tf.add(source(0), tf.multiply(source(1), tf.reshape(source(2), (1, {num_heads}, 1, 1))))",
'from': [f"{name}_att_energy", "att_pos_energy", f"{name}_att_pos_energy_scale"]
}
network[f"{name}_att_weights"]["from"] = [f"{name}_att_energy_with_pos_corr"]
## attention weights dropout
network[f"{name}_att_weights_drop"] = {
'class': "dropout", 'dropout_noise_shape': {'*': None},
'dropout': sa_dropout, 'from': [f"{name}_att_weights"],
}
## now we have an attention weight value for each encoder-side output
## we get per head one vector
network[f"{name}_att0"] = {
'class': "generic_attention", 'weights': f"{name}_att_weights_drop",
'base': f"{name}_att_value", # (B, T, H, V) #(B, H, V)
}
network[f"{name}_self_att_att"] = {
'class': "merge_dims", 'axes': "static", # "static"
'from': [f"{name}_att0"]
}
## not sure, if this works
if windowing:
#hard masking
if not gauss_window:
eval_win_size = f'tf.expand_dims(tf.tile(tf.expand_dims(tf.expand_dims(tf.constant({window_size}, dtype=tf.int32), axis = -1), axis = -1), '\
f'[1, tf.shape(source(0))[-2], tf.shape(source(0))[-1]]), 0)'
eval_win_start = f'tf.expand_dims(tf.map_fn(fn = lambda t: tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-1]), 0), '\
f'[tf.shape(source(0))[2], 1]) - t, elems=tf.constant({window_size}, dtype=tf.int32)//2), 0)'
# eval_encoderT_pos = 'tf.tile(tf.expand_dims(tf.expand_dims(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-2]), -1), '\
# '[1, tf.shape(source(0))[-1]]), 0), 0), [1, tf.shape(source(0))[1], 1, 1])'
eval_encoderT_pos = 'tf.expand_dims(tf.reshape(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-2]), -1), '\
'[tf.shape(source(0))[1], tf.shape(source(0))[-1]]), tf.shape(source(0))[1:]), 0)'
# without batch dim.
#eval_masking = 'tf.logical_and(tf.less_equal(source(0), source(1)), tf.greater_equal(source(0), source(2)))'
eval_masking = 'tf.tile(tf.logical_and(tf.less_equal(source(0), source(1)), tf.greater_equal(source(0), source(2))), '\
'[tf.shape(source(3))[0], 1, 1, 1])'
network[f"{name}_att_energy"]['out_type'] = {'time_dim_axis': 3}
network[f"{name}_win_size"] = {
'class': 'eval',
'eval': eval_win_size,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'int32'}
}
network[f"{name}_win_start"] = {
'class': 'eval',
'eval': eval_win_start,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'int32'}
}
## normalize the attention weights (depends on key/query dim.)
# network[f"{name}_att_weights"]['window_start'] = f"{name}_win_start"
# network[f"{name}_att_weights"]['window_size'] = f"{name}_win_size"
network[f"{name}_win_end"] = {
'class': 'combine',
'from': [f"{name}_win_start", f"{name}_win_size"],
'kind': 'add'
}
network[f"{name}_encoderT_pos"] = {
'class': 'eval',
'eval': eval_encoderT_pos,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'int32'}
}
network[f"{name}_masking"] = {
'class': 'eval',
'eval': eval_masking,
'from': [f"{name}_encoderT_pos", f"{name}_win_end", f"{name}_win_start", f"{name}_att_energy"],
'out_type': {'dtype': 'bool'}
}
network[f"{name}_att_energy_masked"] = {
'class': 'eval',
'eval': f"tf.where(source(0), source(1), "\
f"tf.tile(tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.expand_dims(tf.constant(float('-inf')), 0), 0), 0), 0), tf.shape(source(1))))",
'from': [f"{name}_masking", f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
#soft masking: Gaussian window
else:
eval_key_pos = 'tf.cast(tf.expand_dims(tf.reshape(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-2]), -1), ' \
'[tf.shape(source(0))[1], tf.shape(source(0))[-1]]), tf.shape(source(0))[1:]), 0), "float32")'
eval_query_pos = f'tf.cast(tf.expand_dims(tf.tile(tf.expand_dims(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-1]), 0), '\
f'[tf.shape(source(0))[-2], 1]), 0), [{num_heads}, 1, 1]), 0), "float32")'
network[f"{name}_key_pos"] = {
'class': 'eval',
'eval': eval_key_pos,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
network[f"{name}_query_pos"] = {
'class': 'eval',
'eval': eval_query_pos,
'from': [f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
network[f"{name}_std_for_gaussian_window"] = {
'class': 'variable',
'init': window_size[0],
'shape': (num_heads,)
}
network[f"{name}_masking"] = {
'class': 'eval',
'eval': f'{half_ratio} * tf.square(source(0) - source(1)) / tf.reshape(tf.square(source(2)), [tf.shape(source(3))[0], {num_heads}, 1, 1])',
'from': [f"{name}_query_pos", f"{name}_key_pos", f"{name}_std_for_gaussian_window", f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
network[f"{name}_att_energy_masked"] = {
'class': 'combine',
'kind': 'add',
'from': [f"{name}_masking", f"{name}_att_energy"],
'out_type': {'dtype': 'float32'}
}
network[f"{name}_att_weights"]['from'] = [f"{name}_att_energy_masked"]
network[f"{name}_att_weights"]['use_time_mask'] = False
else:
network[f"{name}_self_att_att"] = {
'class': "self_attention", 'num_heads': num_heads,
'total_key_dim': key_dim, 'n_out': value_dim,
'from': [f"{name}_self_att_laynorm"],
'attention_left_only': attention_left_only,
'attention_dropout': sa_dropout,
'forward_weights_init': initialization,
}
if relative_pe:
network[f"{name}_rel_pos"] = {
"class": "relative_positional_encoding",
"from": [f"{name}_self_att_laynorm"],
"fixed": fixed,
"clipping": clipping,
"n_out": key_dim // num_heads,
"forward_weights_init": initialization
}
network[f"{name}_self_att_att"]["key_shift"] = f"{name}_rel_pos"
if linear_mapping:
network[f"{name}_self_att_lin"] = {
'class': "linear", 'activation': None, 'with_bias': linear_mapping_bias,
'from': [f"{name}_self_att_att"], 'n_out': model_dim,
'forward_weights_init': initialization
}
network[f"{name}_self_att_drop"] = {
'class': "dropout", 'dropout': sa_post_dropout,
'from': [f"{name}_self_att_lin"]
}
else:
network[f"{name}_self_att_drop"] = {
'class': "dropout", 'dropout': sa_post_dropout,
'from': [f"{name}_self_att_att"]
}
network[f"{name}_self_att_out"] = {
'class': "combine", 'kind': "add",
'from': [f"{name}_ff1_out", f"{name}_self_att_drop"],
'n_out': model_dim
}
## convolution module
network[f"{name}_conv_laynorm"] = {'class': "layer_norm",
'from': [f"{name}_self_att_out"]}
## d --> 2d for GLU activation
## can linear as an alternative to pointwise conv.?
network[f"{name}_conv_pointwise1"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_conv_laynorm"], 'n_out': 2 * model_dim,
'forward_weights_init': initialization
}
## (batch, time, feature)
network[f"{name}_conv_GLU"] = {
'class': "gating",
'activation': "identity",
'from': [f"{name}_conv_pointwise1"]
}
out_layer_name = f"{name}_conv_GLU"
if se_pos == "after_first_conv":
# TODO: implement
inpl = f"{name}_conv_GLU"
out_layer_name = add_SE_block(network, inpl, name, se_act)
if normal_conv:
network[f"{name}_conv_expanded"] = {
"class": "split_dims", "axis": "F", "dims": (-1, 1),
"from": [out_layer_name]
}
## (T, F, 1)
network[f"{name}_conv_normal"] = {
"class": "conv",
"from": [f"{name}_conv_expanded"], "padding": "same",
"filter_size": (kernel_size, kernel_size_for_feature),
"n_out": output_channels, "activation": None, "with_bias": True #model_dim//kernel_size
}
network[f"{name}_conv_normal_flattened"] = {
"class": "merge_dims",
"from": [f"{name}_conv_normal"],
"axes": "static"
}
## parameter intensiv
network[f"{name}_conv_transformed"] = {
'class': "linear",
'activation': None,
'with_bias': False,
'forward_weights_init': initialization,
'n_out': model_dim,
"from": [f"{name}_conv_normal_flattened"]
}
network[f"{name}_conv_batchnorm"] = {
'class': "batch_norm",
'from': [f"{name}_conv_transformed"]
}
else:
network[f"{name}_conv_depthwise"] = {
'activation': None,
'class': 'conv',
'filter_size': (kernel_size,),
'from': [out_layer_name],
'groups': model_dim,
'n_out': model_dim,
'padding': 'same',
'with_bias': True
}
out_layer_name = f"{name}_conv_depthwise"
if se_pos == "after_depthwise_conv":
# TODO: implement
inpl = f"{name}_conv_depthwise"
out_layer_name = add_SE_block(network, inpl, name, se_act)
network[f"{name}_conv_batchnorm"] = {
'class': "batch_norm",
'from': [out_layer_name]
}
network[f"{name}_conv_act"] = {
'class': "activation",
'activation': "swish",
'from': [f"{name}_conv_batchnorm"]
}
network[f"{name}_conv_pointwise2"] = {
'class': "linear", 'activation': None, 'with_bias': False,
'from': [f"{name}_conv_act"], 'n_out': model_dim,
'forward_weights_init': initialization
}
out_layer_name = f"{name}_conv_pointwise2"
if se_pos == "after_sec_conv":
# TODO: implement
inpl = f"{name}_conv_pointwise2"
out_layer_name = add_SE_block(network, inpl, name, se_act)
network[f"{name}_conv_dropout"] = {
'class': "dropout", 'dropout': conv_post_dropout,
'from': [out_layer_name],
}
network[f"{name}_conv_output"] = {
'class': "combine", 'kind': "add",
'from': [f"{name}_self_att_out", f"{name}_conv_dropout"], 'n_out': model_dim,
}
## second ffn layer
network[f"{name}_ff2_laynorm"] = {'class': "layer_norm",
'from': [f"{name}_conv_output"]}
network[f"{name}_ff2_conv1"] = {
'class': "linear", 'activation': ff_activation, 'with_bias': True,
'from': [f"{name}_ff2_laynorm"],
'n_out': ff_dim, 'forward_weights_init': initialization
}
network[f"{name}_ff2_conv2"] = {
'class': "linear", 'activation': None, 'with_bias': True,
'from': [f"{name}_ff2_conv1"], 'dropout': ff_activation_dropout,
'n_out': model_dim, 'forward_weights_init': initialization
}
network[f"{name}_ff2_drop"] = {'class': "dropout",
'dropout': ff_post_dropout,
'from': [f"{name}_ff2_conv2"]}
network[f"{name}_ff2_drop_half"] = {
'class': "eval",
'eval': f"{half_ratio} * source(0)",
'from': [f"{name}_ff2_drop"]
}
network[f"{name}_ff2_out"] = {
'class': "combine", 'kind': "add",
'from': [f"{name}_conv_output", f"{name}_ff2_drop_half"]
}
if switch:
network[f"{name}_conv_output"]['from'] = [f"{name}_ff1_out", f"{name}_conv_dropout"]
network[f"{name}_conv_laynorm"]['from'] = [f"{name}_ff1_out"]
network[f"{name}_self_att_laynorm"]['from'] = [f"{name}_conv_output"]
network[f"{name}_self_att_out"]['from'] = [f"{name}_conv_output", f"{name}_self_att_drop"]
network[f"{name}_ff2_laynorm"]['from'] = [f"{name}_self_att_out"]
network[f"{name}_ff2_out"]['from'] = [f"{name}_self_att_out", f"{name}_ff2_drop_half"]
## final layer norm
if end_layernorm:
network[f"{name}"] = {
'class': "layer_norm",
'from': [f"{name}_ff2_out"]
}
else:
network[f"{name}"] = {
'class': "copy",
'from': [f"{name}_ff2_out"]
}
| users/schupp/hybrid_hmm_nn/network_builders/layers/conformer_SE_block_layer_dynamic_oneact.py | 29,411 | Patchining in some alternate conformer arcitectures This adds and SE block anywhere Returns the output layer name Hack but does the trick first ffn with residual connection MHSA module query per head (B, T, H, D/H) (B, enc-T, D) split the key and value vectors for each head (B, enc-T, H, D/H) (B, enc-T, H, D'/H) encoder-decoder energy we have exactly enc-T energy values (B, H, key-T, query-T) normalize the attention weights (depends on key/query dim.) (B, H, key-T, query-T), key-T is where softmax is performed relative_pe as in transformer xl (B, enc-T, d_pos) network['pos_with_0'] = { "class": "eval", "from": ["pos"], "eval": f"tf.slice(tf.concat([tf.expand_dims(tf.tile(tf.reshape([0, 1] * ({model_dim}//2), " \ f"(1, {model_dim})), [tf.shape(source(0))[0], 1]), 1), source(0)], 1), [0, 0, 0], [-1, tf.shape(source(0))[1], -1])"} (B, enc-T, D) pos_with_0 (B, enc-T, H, D/H) (B, enc-T, D) pos_with_0 (B, enc-T, H, D/H) (B, enc-T, H, D/H), (B, dec-T, H, D/H) -> (B, H, enc-T, dec-T) (B, H, enc-T, dec-T) (B, 4, F) (B, enc-T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T=1)'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads} (B, d, 4) (B, enc - T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T = 1) 'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads} (B, H, T, T')(B, d, 4) (B, enc - T, H, D / H), (B, 1, H, D / H) --> (B, H, enc-T, dec-T = 1)'batch_dim_axis': 0, 'time_dim_axis': 2, "feature_dim_axis": 1, "dim": num_heads} (B, H, T, T') shared (B, T, H, D/H) (B, enc-T, D) (B, enc-T, H, D/H) per layer (B, T, H, D/H) (B, enc-T, D) (B, enc-T, H, D/H) with corrected normalization factor (B, T, H, D/H) (B, enc-T, D) (B, enc-T, H, D/H) scale per layer (B, T, H, D/H) (B, enc-T, D) (B, enc-T, H, D/H) attention weights dropout now we have an attention weight value for each encoder-side output we get per head one vector (B, T, H, V) (B, H, V) "static" not sure, if this workshard masking eval_encoderT_pos = 'tf.tile(tf.expand_dims(tf.expand_dims(tf.tile(tf.expand_dims(tf.range(tf.shape(source(0))[-2]), -1), '\ '[1, tf.shape(source(0))[-1]]), 0), 0), [1, tf.shape(source(0))[1], 1, 1])' without batch dim.eval_masking = 'tf.logical_and(tf.less_equal(source(0), source(1)), tf.greater_equal(source(0), source(2)))' normalize the attention weights (depends on key/query dim.) network[f"{name}_att_weights"]['window_start'] = f"{name}_win_start" network[f"{name}_att_weights"]['window_size'] = f"{name}_win_size"soft masking: Gaussian window convolution module d --> 2d for GLU activation can linear as an alternative to pointwise conv.? (batch, time, feature) TODO: implement (T, F, 1)model_dim//kernel_size parameter intensiv TODO: implement TODO: implement second ffn layer final layer norm | 2,790 | en | 0.448129 |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
def load_arguments(self, _):
pass
| src/fidalgo/azext_fidalgo/generated/_params.py | 568 | -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=too-many-lines pylint: disable=too-many-statements | 506 | en | 0.551802 |
import os
import re
import json
import uuid
from string import Template
from iocbuilder.iocinit import IocDataStream
def debug_print(message, level):
if int(os.getenv("ODIN_BUILDER_DEBUG", 0)) >= level:
print(message)
ADODIN_ROOT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../.."))
ADODIN_DATA = os.path.join(ADODIN_ROOT, "data")
def data_file_path(file_name):
return os.path.join(ADODIN_DATA, file_name)
class OdinPaths(object):
@classmethod
def configure_paths(cls, release_path):
paths = cls.parse_release_file(release_path)
cls.HDF5_FILTERS = os.path.join(paths["HDF5_FILTERS"], "prefix/hdf5_1.10/h5plugin")
cls.ODIN_DATA = paths["ODIN_DATA"]
for detector_path in [path for module, path in paths.items()
if module.endswith("DETECTOR")]:
detector_paths = cls.parse_release_file(
os.path.join(detector_path, "configure/RELEASE")
)
if detector_paths["ODIN_DATA"] != cls.ODIN_DATA:
raise EnvironmentError("Mismatched odin-data dependency in {}".format(detector_path))
cls.EIGER_DETECTOR = paths["EIGER_DETECTOR"]
cls.EXCALIBUR_DETECTOR = paths["EXCALIBUR_DETECTOR"]
cls.TRISTAN_DETECTOR = paths["TRISTAN_DETECTOR"]
@classmethod
def parse_release_file(cls, release_path):
macros = {}
with open(release_path) as release_file:
for line in release_file.readlines():
if "=" in line:
module, path = line.split("=", 1)
macros[module.strip()] = path.strip()
macro_re = re.compile(r"\$\(([^\)]+)\)")
for macro in macros:
for find in macro_re.findall(macros[macro]):
if find in macros.keys():
macros[macro] = macros[macro].replace("$({})".format(find), macros[find])
return macros
# Read Odin paths on import
OdinPaths.configure_paths(
os.path.join(ADODIN_ROOT, "configure/RELEASE.local")
)
def expand_template_file(template, macros, output_file, executable=False):
if executable:
mode = 0755
else:
mode = None
with open(os.path.join(ADODIN_DATA, template)) as template_file:
template_config = Template(template_file.read())
output = template_config.substitute(macros)
debug_print("--- {} ----------------------------------------------".format(output_file), 2)
debug_print(output, 2)
debug_print("---", 2)
stream = IocDataStream(output_file, mode)
stream.write(output)
def create_batch_entry(beamline, number, name):
return "{beamline}-EA-ODN-{number:02d} st{name}.sh".format(
beamline=beamline, number=number, name=name
)
class OneLineEntry(object):
"""A wrapper to stop JSON entries being split across multiple lines.
Wrap this around lists, dictionaries, etc to stop json.dumps from
splitting them over multiple lines. Must pass OneLineEncoder to
json.dumps(cls=).
"""
def __init__(self, value):
self.value = value
class OneLineEncoder(json.JSONEncoder):
def __init__(self, *args, **kwargs):
super(OneLineEncoder, self).__init__(*args, **kwargs)
self.kwargs = dict(kwargs)
del self.kwargs["indent"]
self._replacement_map = {}
def default(self, o):
if isinstance(o, OneLineEntry):
key = uuid.uuid4().hex
self._replacement_map[key] = json.dumps(o.value, **self.kwargs)
return "@@%s@@" % (key,)
else:
return super(OneLineEncoder, self).default(o)
def encode(self, o):
result = super(OneLineEncoder, self).encode(o)
for key, value in self._replacement_map.iteritems():
result = result.replace("\"@@%s@@\"" % (key,), value)
return result
def create_config_entry(dictionary):
entry = json.dumps(dictionary, indent=2, cls=OneLineEncoder)
return entry.replace("\n", "\n ")
| etc/builder/util.py | 4,035 | Read Odin paths on import | 25 | en | 0.72692 |
"""
Remote platform
This platform uses physical ethernet interfaces.
"""
# Update this dictionary to suit your environment.
remote_port_map = {
(0, 0): "eth0",
(0, 1): "eth1",
(0, 2): "eth2",
(0, 3): "eth3",
(0, 4): "eth4",
(0, 5): "eth5",
(0, 6): "eth6",
(0, 7): "eth7",
(0, 8): "eth8",
(0, 9): "eth9",
(0, 10): "eth10",
(0, 11): "eth11",
(0, 12): "eth12",
(0, 13): "eth13",
(0, 14): "eth14",
(0, 15): "eth15",
(0, 16): "eth16",
(0, 17): "eth17",
(0, 18): "eth18",
(0, 19): "eth19",
(0, 20): "eth20",
(0, 21): "eth21",
(0, 22): "eth22",
(0, 23): "eth23",
(0, 24): "eth24",
(0, 25): "eth25",
(0, 26): "eth26",
(0, 27): "eth27",
(0, 28): "eth28",
(0, 29): "eth29",
(0, 30): "eth30",
(0, 31): "eth31",
}
def platform_config_update(config):
"""
Update configuration for the remote platform
@param config The configuration dictionary to use/update
"""
global remote_port_map
config["port_map"] = remote_port_map.copy()
config["caps_table_idx"] = 0
| src/ptf/platforms/remote.py | 1,105 | Update configuration for the remote platform
@param config The configuration dictionary to use/update
Remote platform
This platform uses physical ethernet interfaces.
Update this dictionary to suit your environment. | 219 | en | 0.566956 |
import numpy as np
import pandas as pd
import torch
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from torch.utils.data import DataLoader
from sklearn.ensemble import ExtraTreesClassifier
from parameters import *
from training.evaluation import Evaluate, ClassificationRanker
from training.feature_extraction import FeatureExtraction
from training.train_loop import train_loop
from training.utils import Utils, Datasets
import models as md
# Define Processor
print("1.\t" + str(device.type).capitalize() + " detected\n")
# Preprocess Data
utils = Utils()
featureExtraction = FeatureExtraction()
# validation data
print("2.\tProcessing Resume data for validation ...")
resume = utils.process_resumes(pth, categories, scores, query_name, feature_name)
featureExtraction.generate_features(resume, query_name, feature_name, resume_path)
# train data
print("3.\tProcessing Train data ...")
# utils.clean_save_data(data_train_path, data_test_path, data_valid_path, required_columns, clean_data_path)
# Load Data
print("4.\tLoading Data ...")
valid = utils.load_data(resume_path)
train_test = utils.load_data(clean_data_path)
output_dim = 1#len(train_test.y.unique())
# Train/Test Split
print("5.\tGetting Train/Test/Validation Data ...")
x_train, x_test, x_valid, y_train, y_test, y_valid, qid_train, qid_test, qid_valid = \
utils.split_data(train_test, valid, .05)
print('6.\tTrain: {}\tTest: {}\tValid: {}\tOutput: {}'.format(x_train.shape, x_test.shape, x_valid.shape, output_dim))
print(
'7.\tUnique Query Ids (train: {}\ttest: {}\tvalid: {})'.format(len(np.unique(qid_train)), len(np.unique(qid_test)),
len(np.unique(qid_valid))))
# Define Model
# model = md.RNN(x_train.shape[1], output_dim, hidden2, 2)
# model = md.Model1(x_train.shape[1], hidden1, hidden2, hidden3, output_dim)
# model = md.Model2(output_dim)
model = md.Model4(x_train.shape[1], output_dim)
model.to(device)
print("8.\tModel defined and moved to " + str(device.__str__()))
# Parameters
optimizer = Optimizer(model.parameters())
scheduler = scheduler(optimizer)
print("9.\tCriterion set as " + str(criterion.__str__()))
print("10.\tOptimizer set as " + str(optimizer.__str__()))
# Data Loader
train_dataset = Datasets(y_train, x_train, qid_train)
test_dataset = Datasets(y_test, x_test, qid_test)
valid_dataset = Datasets(y_valid, x_valid, qid_valid)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=56, shuffle=True)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)
train_qid, train_labels, train_features = next(iter(train_loader))
print("11.\tDataLoader Shapes-> QID: {}\tLabel: {}\tFeatures: {}".format(train_qid.size(), train_labels.size(),
train_features.size()))
# NN Model
print("12.\tTrain loop")
# train_loop(model, epochs, optimizer, criterion, train_loader, test_loader, valid_loader, k_rank,
# printing_gap, saved_model_device, model_path, device, PIK_plot_data, scheduler)
# Regressor Model
# rfr = RandomForestRegressor(n_estimators=200, min_samples_split=5, random_state=1, n_jobs=-1)
# rfr.fit(x_train, y_train)
# Evaluate().print_evaluation(rfr, x_train, y_train, qid_train, k_rank)
# Evaluate().print_evaluation(rfr, x_test, y_test, qid_test, k_rank)
# Evaluate().print_evaluation(rfr, x_valid, y_valid, qid_valid, k_rank)
# Evaluate().save_model(rfr, reg_model_path)
# SVM Model
sm = svm.SVR()
sm.fit(x_train, y_train)
Evaluate().print_evaluation(sm, x_train, y_train, qid_train, k_rank)
Evaluate().print_evaluation(sm, x_test, y_test, qid_test, k_rank)
Evaluate().print_evaluation(sm, x_valid, y_valid, qid_valid, k_rank)
Evaluate().save_model(sm, svm_model_path)
# Classifier Model
# etc = ClassificationRanker(LogisticRegression(C=1000))
# etc.fit(x_train, y_train)
# Evaluate().print_evaluation(etc, x_train, y_train, qid_train, k_rank)
# Evaluate().print_evaluation(etc, x_test, y_test, qid_test, k_rank)
# Evaluate().print_evaluation(etc, x_valid, y_valid, qid_valid, k_rank)
#
# yp = rfr.predict(x_valid)
# for i, j, k in zip(qid_valid, y_valid, yp):
# print(i, j, k)
| training/train.py | 4,330 | Define Processor Preprocess Data validation data train data utils.clean_save_data(data_train_path, data_test_path, data_valid_path, required_columns, clean_data_path) Load Datalen(train_test.y.unique()) Train/Test Split Define Model model = md.RNN(x_train.shape[1], output_dim, hidden2, 2) model = md.Model1(x_train.shape[1], hidden1, hidden2, hidden3, output_dim) model = md.Model2(output_dim) Parameters Data Loader NN Model train_loop(model, epochs, optimizer, criterion, train_loader, test_loader, valid_loader, k_rank, printing_gap, saved_model_device, model_path, device, PIK_plot_data, scheduler) Regressor Model rfr = RandomForestRegressor(n_estimators=200, min_samples_split=5, random_state=1, n_jobs=-1) rfr.fit(x_train, y_train) Evaluate().print_evaluation(rfr, x_train, y_train, qid_train, k_rank) Evaluate().print_evaluation(rfr, x_test, y_test, qid_test, k_rank) Evaluate().print_evaluation(rfr, x_valid, y_valid, qid_valid, k_rank) Evaluate().save_model(rfr, reg_model_path) SVM Model Classifier Model etc = ClassificationRanker(LogisticRegression(C=1000)) etc.fit(x_train, y_train) Evaluate().print_evaluation(etc, x_train, y_train, qid_train, k_rank) Evaluate().print_evaluation(etc, x_test, y_test, qid_test, k_rank) Evaluate().print_evaluation(etc, x_valid, y_valid, qid_valid, k_rank) yp = rfr.predict(x_valid) for i, j, k in zip(qid_valid, y_valid, yp): print(i, j, k) | 1,404 | en | 0.351123 |
import argparse
import configparser
import pexpect
import re
import os
from ipykernel.kernelbase import Kernel
from . import __version__
""" Macaulay2 Jupyter Kernel
"""
class M2Config:
""""""
def __init__(self, execpath, configpath=os.getenv('M2JK_CONFIG')):
""""""
parser = argparse.ArgumentParser(usage=argparse.SUPPRESS)
config = configparser.ConfigParser(allow_no_value=True)
parser.add_argument('--timeout', type=int, default=2)
parser.add_argument('--timeout_startup', type=int, default=5)
parser.add_argument('--mode', choices=['default', 'original', 'texmacs', 'pretty'],
default='default')
# parser.add_argument('--debug', default=False,
# type=lambda x: True if x.lower() in ['1','true','on'] else False)
parser.add_argument('--theme', choices=['default', 'emacs'], default='default')
# execpath is now mutable, but modifying it is no-op. fix this
parser.add_argument('--execpath', default=execpath)
parser.add_argument('--version', action='store_const', const=__version__, default=__version__)
parser.add_argument('--configpath', action='store_const', const=configpath, default=configpath)
parser.add_argument('--config')
args = parser.parse_args('')
if configpath:
config.read(configpath)
line = ' '.join(['--{} {}'.format(key, val) for key, val in config.items('magic')])
args = parser.parse_args(line.split(), args)
self.parser = parser
self.config = config
self.args = args
def read(self, line):
""""""
self.config.remove_section('temp')
try:
self.config.read_string('[temp]\n'+line)
key, val = self.config.items('temp')[0]
if key in self.args:
self.args = self.parser.parse_args('--{} {}'.format(key, val).split(), self.args)
val = self.args.__dict__[key]
msg = '[magic succeeded] {} = {}'.format(key, val)
except:
key, val = None, None
msg = '[magic failed]'
return key, val, msg
class M2Interp:
""" an interpreter for Macaulay2
"""
patt_input = re.compile(br'^i(\d+)\s:')
debug = False
def __init__(self, execpath=pexpect.which('M2'), timeout=4, configpath=None):
""""""
self.conf = M2Config(execpath, configpath)
self.proc = None
self.proc_command = self.conf.args.execpath
self.proc_kwargs = {
'args': ['--silent', '--no-debug', '-e', 'load("init.m2")'],
'cwd': os.path.dirname(__file__) + '/assets/m2-code/',
'timeout': timeout
}
def start(self):
""""""
if not (self.proc is None):
return
self.proc = pexpect.spawn(self.proc_command, **self.proc_kwargs)
self.proc.delaybeforesend = None
def preprocess(self, code, usemagic, printwidth=80):
""""""
magic_lines = []
code_lines = []
for line in code.splitlines():
trimmed = line.lstrip()
if not trimmed:
continue
elif usemagic and trimmed.startswith('--%'):
key, val, msg = self.conf.read(trimmed[3:])
cmd = ''
if key == 'timeout':
self.proc.timeout = val
elif key == 'mode':
if val == 'original':
self.debug = True
else:
self.debug = False
if val == 'texmacs':
cmd = 'mode(true);'
else:
cmd = 'mode(false);'
magic_lines.append(cmd + ' << "{}";--CMD'.format(msg))
elif trimmed.startswith('--'):
continue
else:
code_lines.append(line+'--CMD')
if magic_lines or code_lines:
return 'noop(begin)--CMD\n{}\nnoop(end)--CMD--EOB'.format('\n'.join(magic_lines+code_lines))
return ''
def execute(self, code, lastonly=True, usemagic=True):
""""""
clean_code = self.preprocess(code, usemagic=usemagic)
if self.debug: print(clean_code)
if not clean_code: return []
try:
return self.repl(clean_code, lastonly=lastonly)
except Exception as e:
# kill M2 execution
# self.proc.sendcontrol('c')
# clear buffer - this is not great but works - fix it
# for line in self.proc:
# if line.endswith(b'--EOB'): break
# rethrow
raise e
def repl(self, clean_code, lastonly):
""" REPL
If `self.debug==True` then result is the raw list of lines of bytes,
otherwise, it is a list of (lineNumber, stdoutLines, valueLines, typeLines),
where again the last 3 entries are lists of lines of bytes.
"""
self.proc.sendline(clean_code)
EOT = False
debug_lines = []
nodes = []
node = ()
linenumber = None
state = None
# make sure you are not reading an echo!
# this is important! echo occurs often especially when using M2Interp.execute() directly
# https://pexpect.readthedocs.io/en/stable/commonissues.html#timing-issue-with-send-and-sendline
for echoline in self.proc:
if echoline[:1] == b'i' and echoline.endswith(b'noop(begin)--CMD\r\n'):
break
while not EOT:
try:
for testline in self.proc:
line = testline[:-2]
if self.debug: print(line)
break
except pexpect.TIMEOUT:
self.proc.sendcontrol('c')
self.proc.read(1) # this is VERY IMPORTANT!
if node:
node[1].append('\r\no{} = [KERNEL ENFORCED TIMEOUT]'.format(linenumber).encode())
nodes.append(node)
return debug_lines if self.debug else nodes
if line.endswith(b'--EOB'):
EOT = True
if self.debug:
debug_lines.append(line)
continue
if line.endswith(b'--CMD'):
newinput = self.patt_input.match(line)
if newinput:
if node:
if lastonly:
nodes.append((node[0],node[1],[],[]))
else:
nodes.append(node)
linenumber = int(newinput.groups()[0])
node = (linenumber,[],[],[])
state = 'CMD'
elif line.endswith(b'--VAL'):
state = 'VAL'
elif line.endswith(b'--CLS'):
state = 'CLS'
else: # inside one of the states
if state=='CMD': # stdout
node[1].append(line)
elif state=='VAL':
node[2].append(line)
elif state=='CLS':
node[3].append(line)
# trim the empty trailing line coming from next input line
if not node:
pass
elif node[2]:
nodes.append((node[0],node[1],node[2],node[3][:-1]))
else:
nodes.append((node[0],node[1][:-1],[],[]))
return debug_lines if self.debug else nodes
class M2Kernel(Kernel):
""" the M2 kernel for Jupyter
"""
implementation = 'macaulay2_jupyter_kernel'
implementation_version = __version__
language = 'Macaulay2'
language_version = '1.13.0.1' # "defining implementation" version
language_info = {
'name': 'Macaulay2',
'mimetype': 'text/x-macaulay2',
'file_extension': '.m2',
'codemirror_mode': 'macaulay2',
# 'pigments_lexer': None,
}
banner = 'Jupyter Kernel for Macaulay2 (v{})'.format(implementation_version)
help_links = [{
'text': 'M2JK Demo',
'url': 'https://nbviewer.jupyter.org/github/radoslavraynov/Macaulay2-Jupyter-Kernel/blob/master/demo/demo.ipynb'
}]
def __init__(self, *args, **kwargs):
""" kernel init - calls __init__ on the parent and sets up the M2Interp object
"""
super().__init__(*args, **kwargs)
self.interp = M2Interp(configpath=os.environ.get('M2JK_CONFIG'))
self.interp.start()
def process_output(self, nodes):
"""
"""
mode = self.interp.conf.args.mode
if mode == 'original':
clean_lines = []
for ln in nodes:
if ln.endswith(b'--EOB') or ln.endswith(b'--VAL') or ln.endswith(b'--CLS'):
pass
elif ln.endswith(b'--CMD'):
clean_lines.append(ln[:-5])
else:
clean_lines.append(ln)
return None, b'\n'.join(clean_lines).decode()
elif self.interp.debug:
return nodes
elif mode == 'default':
lines = [ln.decode() for node in nodes for part in node[1:] for ln in part]
return None, '\n'.join(lines)
stdout = '\n'.join([ln.decode() for node in nodes for ln in node[1]])
if mode == 'texmacs':
value_lines = nodes[-1][2]
if value_lines:
dirty = '\n'.join([ln.decode() for ln in value_lines])
clean = dirty[6:] + '\n</math>'
return {'text/html': clean}, stdout
elif mode == 'pretty':
margin = len(str(nodes[-1][0]))+4
textval = '\n'.join([ln[margin:].decode() for ln in nodes[-1][2]])
textcls = '\n'.join([ln[margin:].decode() for ln in nodes[-1][3]])
html = '<pre>{}</pre><pre style="color: gray">{}</pre>'.format(textval, textcls)
return {'text/html': html}, stdout
return None, stdout
def send_stream(self, text, stderr=False):
""" enqueues a stdout or stderr message for the given cell
"""
stdfile = 'stderr' if stderr else 'stdout'
content = {'name': stdfile, 'text': text+'\n'}
self.send_response(self.iopub_socket, 'stream', content)
def mock_execute(self, code):
""""""
output_lines = self.interp.execute(code, lastonly=False)
return self.process_output(output_lines)
def do_execute(self, code, silent, store_history=True, user_expressions=None, allow_stdin=False):
""" kernel entry point for the execution of each cell
"""
try:
output_lines = self.interp.execute(code)
except Exception as e:
output_lines = []
self.send_stream(str(e), True)
xcount = None
if not silent:
if not output_lines:
return {'status': 'ok',
'execution_count': None,
'payload': [],
'user_expressions': {}}
data, stream = self.process_output(output_lines)
xcount = output_lines[-1][0]
if stream:
stdout_content = {'name': 'stdout', 'text': stream}
self.send_response(self.iopub_socket, 'stream', stdout_content)
if data:
execute_content = {'data': data, 'execution_count': xcount}
self.send_response(self.iopub_socket, 'execute_result', execute_content)
return {'status': 'ok',
'execution_count': xcount,
'payload': [],
'user_expressions': {}}
| m2_kernel/kernel.py | 11,722 | an interpreter for Macaulay2
the M2 kernel for Jupyter
kernel init - calls __init__ on the parent and sets up the M2Interp object
kernel entry point for the execution of each cell
REPL
If `self.debug==True` then result is the raw list of lines of bytes,
otherwise, it is a list of (lineNumber, stdoutLines, valueLines, typeLines),
where again the last 3 entries are lists of lines of bytes.
enqueues a stdout or stderr message for the given cell
parser.add_argument('--debug', default=False, type=lambda x: True if x.lower() in ['1','true','on'] else False) execpath is now mutable, but modifying it is no-op. fix this kill M2 execution self.proc.sendcontrol('c') clear buffer - this is not great but works - fix it for line in self.proc: if line.endswith(b'--EOB'): break rethrow make sure you are not reading an echo! this is important! echo occurs often especially when using M2Interp.execute() directly https://pexpect.readthedocs.io/en/stable/commonissues.htmltiming-issue-with-send-and-sendline this is VERY IMPORTANT! inside one of the states stdout trim the empty trailing line coming from next input line "defining implementation" version 'pigments_lexer': None, | 1,241 | en | 0.629152 |
#!/usr/bin/env python3
import argparse
import common
import functools
import multiprocessing
import os
import os.path
import pathlib
import re
import subprocess
import stat
import sys
import traceback
import shutil
import paths
EXCLUDED_PREFIXES = ("./generated/", "./thirdparty/", "./build", "./.git/", "./bazel-", "./.cache",
"./source/extensions/extensions_build_config.bzl",
"./bazel/toolchains/configs/", "./tools/testdata/check_format/",
"./tools/pyformat/")
SUFFIXES = ("BUILD", "WORKSPACE", ".bzl", ".cc", ".h", ".java", ".m", ".md", ".mm", ".proto",
".rst")
DOCS_SUFFIX = (".md", ".rst")
PROTO_SUFFIX = (".proto")
# Files in these paths can make reference to protobuf stuff directly
GOOGLE_PROTOBUF_WHITELIST = ("ci/prebuilt", "source/common/protobuf", "api/test")
REPOSITORIES_BZL = "bazel/repositories.bzl"
# Files matching these exact names can reference real-world time. These include the class
# definitions for real-world time, the construction of them in main(), and perf annotation.
# For now it includes the validation server but that really should be injected too.
REAL_TIME_WHITELIST = ("./source/common/common/utility.h",
"./source/extensions/filters/http/common/aws/utility.cc",
"./source/common/event/real_time_system.cc",
"./source/common/event/real_time_system.h", "./source/exe/main_common.cc",
"./source/exe/main_common.h", "./source/server/config_validation/server.cc",
"./source/common/common/perf_annotation.h",
"./test/test_common/simulated_time_system.cc",
"./test/test_common/simulated_time_system.h",
"./test/test_common/test_time.cc", "./test/test_common/test_time.h",
"./test/test_common/utility.cc", "./test/test_common/utility.h",
"./test/integration/integration.h")
# Files in these paths can use MessageLite::SerializeAsString
SERIALIZE_AS_STRING_WHITELIST = (
"./source/common/config/version_converter.cc",
"./source/extensions/filters/http/grpc_json_transcoder/json_transcoder_filter.cc",
"./test/common/protobuf/utility_test.cc",
"./test/common/grpc/codec_test.cc",
"./test/common/grpc/codec_fuzz_test.cc",
)
# Files in these paths can use Protobuf::util::JsonStringToMessage
JSON_STRING_TO_MESSAGE_WHITELIST = ("./source/common/protobuf/utility.cc")
# Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing
# ones were grandfathered as part of PR #8484 for backwards compatibility.
HISTOGRAM_WITH_SI_SUFFIX_WHITELIST = ("downstream_cx_length_ms", "downstream_cx_length_ms",
"initialization_time_ms", "loop_duration_us", "poll_delay_us",
"request_time_ms", "upstream_cx_connect_ms",
"upstream_cx_length_ms")
# Files in these paths can use std::regex
STD_REGEX_WHITELIST = ("./source/common/common/utility.cc", "./source/common/common/regex.h",
"./source/common/common/regex.cc",
"./source/common/stats/tag_extractor_impl.h",
"./source/common/stats/tag_extractor_impl.cc",
"./source/common/access_log/access_log_formatter.cc",
"./source/extensions/filters/http/squash/squash_filter.h",
"./source/extensions/filters/http/squash/squash_filter.cc",
"./source/server/http/admin.h", "./source/server/http/admin.cc",
"./tools/clang_tools/api_booster/main.cc",
"./tools/clang_tools/api_booster/proto_cxx_utils.cc")
# Only one C++ file should instantiate grpc_init
GRPC_INIT_WHITELIST = ("./source/common/grpc/google_grpc_context.cc")
CLANG_FORMAT_PATH = os.getenv("CLANG_FORMAT", "clang-format-9")
BUILDIFIER_PATH = paths.getBuildifier()
BUILDOZER_PATH = paths.getBuildozer()
ENVOY_BUILD_FIXER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])),
"envoy_build_fixer.py")
HEADER_ORDER_PATH = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "header_order.py")
SUBDIR_SET = set(common.includeDirOrder())
INCLUDE_ANGLE = "#include <"
INCLUDE_ANGLE_LEN = len(INCLUDE_ANGLE)
PROTO_PACKAGE_REGEX = re.compile(r"^package (\S+);\n*", re.MULTILINE)
X_ENVOY_USED_DIRECTLY_REGEX = re.compile(r'.*\"x-envoy-.*\".*')
# yapf: disable
PROTOBUF_TYPE_ERRORS = {
# Well-known types should be referenced from the ProtobufWkt namespace.
"Protobuf::Any": "ProtobufWkt::Any",
"Protobuf::Empty": "ProtobufWkt::Empty",
"Protobuf::ListValue": "ProtobufWkt::ListValue",
"Protobuf::NULL_VALUE": "ProtobufWkt::NULL_VALUE",
"Protobuf::StringValue": "ProtobufWkt::StringValue",
"Protobuf::Struct": "ProtobufWkt::Struct",
"Protobuf::Value": "ProtobufWkt::Value",
# Other common mis-namespacing of protobuf types.
"ProtobufWkt::Map": "Protobuf::Map",
"ProtobufWkt::MapPair": "Protobuf::MapPair",
"ProtobufUtil::MessageDifferencer": "Protobuf::util::MessageDifferencer"
}
LIBCXX_REPLACEMENTS = {
"absl::make_unique<": "std::make_unique<",
}
UNOWNED_EXTENSIONS = {
"extensions/filters/http/ratelimit",
"extensions/filters/http/buffer",
"extensions/filters/http/rbac",
"extensions/filters/http/ip_tagging",
"extensions/filters/http/tap",
"extensions/filters/http/health_check",
"extensions/filters/http/cors",
"extensions/filters/http/ext_authz",
"extensions/filters/http/dynamo",
"extensions/filters/http/lua",
"extensions/filters/http/common",
"extensions/filters/common",
"extensions/filters/common/ratelimit",
"extensions/filters/common/rbac",
"extensions/filters/common/lua",
"extensions/filters/listener/original_dst",
"extensions/filters/listener/proxy_protocol",
"extensions/stat_sinks/statsd",
"extensions/stat_sinks/common",
"extensions/stat_sinks/common/statsd",
"extensions/health_checkers/redis",
"extensions/access_loggers/grpc",
"extensions/access_loggers/file",
"extensions/common/tap",
"extensions/transport_sockets/raw_buffer",
"extensions/transport_sockets/tap",
"extensions/tracers/zipkin",
"extensions/tracers/dynamic_ot",
"extensions/tracers/opencensus",
"extensions/tracers/lightstep",
"extensions/tracers/common",
"extensions/tracers/common/ot",
"extensions/retry/host/previous_hosts",
"extensions/filters/network/ratelimit",
"extensions/filters/network/client_ssl_auth",
"extensions/filters/network/rbac",
"extensions/filters/network/tcp_proxy",
"extensions/filters/network/echo",
"extensions/filters/network/ext_authz",
"extensions/filters/network/redis_proxy",
"extensions/filters/network/kafka",
"extensions/filters/network/kafka/protocol",
"extensions/filters/network/kafka/serialization",
"extensions/filters/network/mongo_proxy",
"extensions/filters/network/common",
"extensions/filters/network/common/redis",
}
# yapf: enable
# Map a line transformation function across each line of a file.
# .bak temporaries.
def replaceLines(path, line_xform):
# We used to use fileinput in the older Python 2.7 script, but this doesn't do
# inplace mode and UTF-8 in Python 3, so doing it the manual way.
output_lines = [line_xform(line) for line in readLines(path)]
pathlib.Path(path).write_text('\n'.join(output_lines), encoding='utf-8')
# Obtain all the lines in a given file.
def readLines(path):
return readFile(path).split('\n')
# Read a UTF-8 encoded file as a str.
def readFile(path):
return pathlib.Path(path).read_text(encoding='utf-8')
# lookPath searches for the given executable in all directories in PATH
# environment variable. If it cannot be found, empty string is returned.
def lookPath(executable):
for path_dir in os.environ["PATH"].split(os.pathsep):
executable_path = os.path.join(path_dir, executable)
if os.path.exists(executable_path):
return executable_path
return ""
# pathExists checks whether the given path exists. This function assumes that
# the path is absolute and evaluates environment variables.
def pathExists(executable):
return os.path.exists(os.path.expandvars(executable))
# executableByOthers checks whether the given path has execute permission for
# others.
def executableByOthers(executable):
st = os.stat(os.path.expandvars(executable))
return bool(st.st_mode & stat.S_IXOTH)
# Check whether all needed external tools (clang-format, buildifier, buildozer) are
# available.
def checkTools():
error_messages = []
clang_format_abs_path = lookPath(CLANG_FORMAT_PATH)
if clang_format_abs_path:
if not executableByOthers(clang_format_abs_path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(CLANG_FORMAT_PATH))
else:
error_messages.append(
"Command {} not found. If you have clang-format in version 8.x.x "
"installed, but the binary name is different or it's not available in "
"PATH, please use CLANG_FORMAT environment variable to specify the path. "
"Examples:\n"
" export CLANG_FORMAT=clang-format-9.0.0\n"
" export CLANG_FORMAT=/opt/bin/clang-format-9\n"
" export CLANG_FORMAT=/usr/local/opt/llvm@9/bin/clang-format".format(CLANG_FORMAT_PATH))
def checkBazelTool(name, path, var):
bazel_tool_abs_path = lookPath(path)
if bazel_tool_abs_path:
if not executableByOthers(bazel_tool_abs_path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(path))
elif pathExists(path):
if not executableByOthers(path):
error_messages.append("command {} exists, but cannot be executed by other "
"users".format(path))
else:
error_messages.append(
"Command {} not found. If you have buildifier installed, but the binary "
"name is different or it's not available in $GOPATH/bin, please use "
"{} environment variable to specify the path. Example:\n"
" export {}=/opt/bin/buildifier\n"
"If you don't have buildifier installed, you can install it by:\n"
" go get -u github.com/bazelbuild/buildtools/{}".format(path, var, var, name))
checkBazelTool('buildifier', BUILDIFIER_PATH, 'BUILDIFIER_BIN')
checkBazelTool('buildozer', BUILDOZER_PATH, 'BUILDOZER_BIN')
return error_messages
def checkNamespace(file_path):
for excluded_path in namespace_check_excluded_paths:
if file_path.startswith(excluded_path):
return []
nolint = "NOLINT(namespace-%s)" % namespace_check.lower()
text = readFile(file_path)
if not re.search("^\s*namespace\s+%s\s*{" % namespace_check, text, re.MULTILINE) and \
not nolint in text:
return ["Unable to find %s namespace or %s for file: %s" % (namespace_check, nolint, file_path)]
return []
def packageNameForProto(file_path):
package_name = None
error_message = []
result = PROTO_PACKAGE_REGEX.search(readFile(file_path))
if result is not None and len(result.groups()) == 1:
package_name = result.group(1)
if package_name is None:
error_message = ["Unable to find package name for proto file: %s" % file_path]
return [package_name, error_message]
# To avoid breaking the Lyft import, we just check for path inclusion here.
def whitelistedForProtobufDeps(file_path):
return (file_path.endswith(PROTO_SUFFIX) or file_path.endswith(REPOSITORIES_BZL) or \
any(path_segment in file_path for path_segment in GOOGLE_PROTOBUF_WHITELIST))
# Real-world time sources should not be instantiated in the source, except for a few
# specific cases. They should be passed down from where they are instantied to where
# they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager.
def whitelistedForRealTime(file_path):
if file_path.endswith(".md"):
return True
return file_path in REAL_TIME_WHITELIST
def whitelistedForSerializeAsString(file_path):
return file_path in SERIALIZE_AS_STRING_WHITELIST
def whitelistedForJsonStringToMessage(file_path):
return file_path in JSON_STRING_TO_MESSAGE_WHITELIST
def whitelistedForHistogramSiSuffix(name):
return name in HISTOGRAM_WITH_SI_SUFFIX_WHITELIST
def whitelistedForStdRegex(file_path):
return file_path.startswith("./test") or file_path in STD_REGEX_WHITELIST or file_path.endswith(
DOCS_SUFFIX)
def whitelistedForGrpcInit(file_path):
return file_path in GRPC_INIT_WHITELIST
def whitelistedForUnpackTo(file_path):
return file_path.startswith("./test") or file_path in [
"./source/common/protobuf/utility.cc", "./source/common/protobuf/utility.h"
]
def findSubstringAndReturnError(pattern, file_path, error_message):
text = readFile(file_path)
if pattern in text:
error_messages = [file_path + ": " + error_message]
for i, line in enumerate(text.splitlines()):
if pattern in line:
error_messages.append(" %s:%s" % (file_path, i + 1))
return error_messages
return []
def errorIfNoSubstringFound(pattern, file_path, error_message):
return [] if pattern in readFile(file_path) else [file_path + ": " + error_message]
def isApiFile(file_path):
return file_path.startswith(args.api_prefix) or file_path.startswith(args.api_shadow_prefix)
def isBuildFile(file_path):
basename = os.path.basename(file_path)
if basename in {"BUILD", "BUILD.bazel"} or basename.endswith(".BUILD"):
return True
return False
def isExternalBuildFile(file_path):
return isBuildFile(file_path) and (file_path.startswith("./bazel/external/") or
file_path.startswith("./tools/clang_tools"))
def isSkylarkFile(file_path):
return file_path.endswith(".bzl")
def isWorkspaceFile(file_path):
return os.path.basename(file_path) == "WORKSPACE"
def isBuildFixerExcludedFile(file_path):
for excluded_path in build_fixer_check_excluded_paths:
if file_path.startswith(excluded_path):
return True
return False
def hasInvalidAngleBracketDirectory(line):
if not line.startswith(INCLUDE_ANGLE):
return False
path = line[INCLUDE_ANGLE_LEN:]
slash = path.find("/")
if slash == -1:
return False
subdir = path[0:slash]
return subdir in SUBDIR_SET
VERSION_HISTORY_NEW_LINE_REGEX = re.compile("\* [a-z \-_]*: [a-z:`]")
VERSION_HISTORY_NEW_RELEASE_REGEX = re.compile("^====[=]+$")
def checkCurrentReleaseNotes(file_path, error_messages):
in_current_release = False
for line_number, line in enumerate(readLines(file_path)):
def reportError(message):
error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message))
if VERSION_HISTORY_NEW_RELEASE_REGEX.match(line):
# If we were in the section for the current release this means we have passed it.
if in_current_release:
break
# If we see a version marker we are now in the section for the current release.
in_current_release = True
if line.startswith("*") and not VERSION_HISTORY_NEW_LINE_REGEX.match(line):
reportError("Version history line malformed. "
"Does not match VERSION_HISTORY_NEW_LINE_REGEX in check_format.py\n %s" % line)
def checkFileContents(file_path, checker):
error_messages = []
if file_path.endswith("version_history.rst"):
# Version file checking has enough special cased logic to merit its own checks.
# This only validates entries for the current release as very old release
# notes have a different format.
checkCurrentReleaseNotes(file_path, error_messages)
for line_number, line in enumerate(readLines(file_path)):
def reportError(message):
error_messages.append("%s:%d: %s" % (file_path, line_number + 1, message))
checker(line, file_path, reportError)
return error_messages
DOT_MULTI_SPACE_REGEX = re.compile("\\. +")
def fixSourceLine(line):
# Strip double space after '.' This may prove overenthusiastic and need to
# be restricted to comments and metadata files but works for now.
line = re.sub(DOT_MULTI_SPACE_REGEX, ". ", line)
if hasInvalidAngleBracketDirectory(line):
line = line.replace("<", '"').replace(">", '"')
# Fix incorrect protobuf namespace references.
for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items():
line = line.replace(invalid_construct, valid_construct)
# Use recommended cpp stdlib
for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items():
line = line.replace(invalid_construct, valid_construct)
return line
# We want to look for a call to condvar.waitFor, but there's no strong pattern
# to the variable name of the condvar. If we just look for ".waitFor" we'll also
# pick up time_system_.waitFor(...), and we don't want to return true for that
# pattern. But in that case there is a strong pattern of using time_system in
# various spellings as the variable name.
def hasCondVarWaitFor(line):
wait_for = line.find(".waitFor(")
if wait_for == -1:
return False
preceding = line[0:wait_for]
if preceding.endswith("time_system") or preceding.endswith("timeSystem()") or \
preceding.endswith("time_system_"):
return False
return True
# Determines whether the filename is either in the specified subdirectory, or
# at the top level. We consider files in the top level for the benefit of
# the check_format testcases in tools/testdata/check_format.
def isInSubdir(filename, *subdirs):
# Skip this check for check_format's unit-tests.
if filename.count("/") <= 1:
return True
for subdir in subdirs:
if filename.startswith('./' + subdir + '/'):
return True
return False
def checkSourceLine(line, file_path, reportError):
# Check fixable errors. These may have been fixed already.
if line.find(". ") != -1:
reportError("over-enthusiastic spaces")
if isInSubdir(file_path, 'source', 'include') and X_ENVOY_USED_DIRECTLY_REGEX.match(line):
reportError(
"Please do not use the raw literal x-envoy in source code. See Envoy::Http::PrefixValue.")
if hasInvalidAngleBracketDirectory(line):
reportError("envoy includes should not have angle brackets")
for invalid_construct, valid_construct in PROTOBUF_TYPE_ERRORS.items():
if invalid_construct in line:
reportError("incorrect protobuf type reference %s; "
"should be %s" % (invalid_construct, valid_construct))
for invalid_construct, valid_construct in LIBCXX_REPLACEMENTS.items():
if invalid_construct in line:
reportError("term %s should be replaced with standard library term %s" %
(invalid_construct, valid_construct))
# Do not include the virtual_includes headers.
if re.search("#include.*/_virtual_includes/", line):
reportError("Don't include the virtual includes headers.")
# Some errors cannot be fixed automatically, and actionable, consistent,
# navigable messages should be emitted to make it easy to find and fix
# the errors by hand.
if not whitelistedForProtobufDeps(file_path):
if '"google/protobuf' in line or "google::protobuf" in line:
reportError("unexpected direct dependency on google.protobuf, use "
"the definitions in common/protobuf/protobuf.h instead.")
if line.startswith("#include <mutex>") or line.startswith("#include <condition_variable"):
# We don't check here for std::mutex because that may legitimately show up in
# comments, for example this one.
reportError("Don't use <mutex> or <condition_variable*>, switch to "
"Thread::MutexBasicLockable in source/common/common/thread.h")
if line.startswith("#include <shared_mutex>"):
# We don't check here for std::shared_timed_mutex because that may
# legitimately show up in comments, for example this one.
reportError("Don't use <shared_mutex>, use absl::Mutex for reader/writer locks.")
if not whitelistedForRealTime(file_path) and not "NO_CHECK_FORMAT(real_time)" in line:
if "RealTimeSource" in line or \
("RealTimeSystem" in line and not "TestRealTimeSystem" in line) or \
"std::chrono::system_clock::now" in line or "std::chrono::steady_clock::now" in line or \
"std::this_thread::sleep_for" in line or hasCondVarWaitFor(line):
reportError("Don't reference real-world time sources from production code; use injection")
if not whitelistedForUnpackTo(file_path):
if "UnpackTo" in line:
reportError("Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
# Check that we use the absl::Time library
if "std::get_time" in line:
if "test/" in file_path:
reportError("Don't use std::get_time; use TestUtility::parseTime in tests")
else:
reportError("Don't use std::get_time; use the injectable time system")
if "std::put_time" in line:
reportError("Don't use std::put_time; use absl::Time equivalent instead")
if "gmtime" in line:
reportError("Don't use gmtime; use absl::Time equivalent instead")
if "mktime" in line:
reportError("Don't use mktime; use absl::Time equivalent instead")
if "localtime" in line:
reportError("Don't use localtime; use absl::Time equivalent instead")
if "strftime" in line:
reportError("Don't use strftime; use absl::FormatTime instead")
if "strptime" in line:
reportError("Don't use strptime; use absl::FormatTime instead")
if "std::atomic_" in line:
# The std::atomic_* free functions are functionally equivalent to calling
# operations on std::atomic<T> objects, so prefer to use that instead.
reportError("Don't use free std::atomic_* functions, use std::atomic<T> members instead.")
if "__attribute__((packed))" in line and file_path != "./include/envoy/common/platform.h":
# __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that
# can be used instead
reportError("Don't use __attribute__((packed)), use the PACKED_STRUCT macro defined "
"in include/envoy/common/platform.h instead")
if re.search("\{\s*\.\w+\s*\=", line):
# Designated initializers are not part of the C++14 standard and are not supported
# by MSVC
reportError("Don't use designated initializers in struct initialization, "
"they are not part of C++14")
if " ?: " in line:
# The ?: operator is non-standard, it is a GCC extension
reportError("Don't use the '?:' operator, it is a non-standard GCC extension")
if line.startswith("using testing::Test;"):
reportError("Don't use 'using testing::Test;, elaborate the type instead")
if line.startswith("using testing::TestWithParams;"):
reportError("Don't use 'using testing::Test;, elaborate the type instead")
if not whitelistedForSerializeAsString(file_path) and "SerializeAsString" in line:
# The MessageLite::SerializeAsString doesn't generate deterministic serialization,
# use MessageUtil::hash instead.
reportError(
"Don't use MessageLite::SerializeAsString for generating deterministic serialization, use MessageUtil::hash instead."
)
if not whitelistedForJsonStringToMessage(file_path) and "JsonStringToMessage" in line:
# Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing
# behavior.
reportError("Don't use Protobuf::util::JsonStringToMessage, use TestUtility::loadFromJson.")
if isInSubdir(file_path, 'source') and file_path.endswith('.cc') and \
('.counter(' in line or '.gauge(' in line or '.histogram(' in line):
reportError("Don't lookup stats by name at runtime; use StatName saved during construction")
if re.search("envoy::[a-z0-9_:]+::[A-Z][a-z]\w*_\w*_[A-Z]{2}", line):
reportError("Don't use mangled Protobuf names for enum constants")
hist_m = re.search("(?<=HISTOGRAM\()[a-zA-Z0-9_]+_(b|kb|mb|ns|us|ms|s)(?=,)", line)
if hist_m and not whitelistedForHistogramSiSuffix(hist_m.group(0)):
reportError(
"Don't suffix histogram names with the unit symbol, "
"it's already part of the histogram object and unit-supporting sinks can use this information natively, "
"other sinks can add the suffix automatically on flush should they prefer to do so.")
if not whitelistedForStdRegex(file_path) and "std::regex" in line:
reportError("Don't use std::regex in code that handles untrusted input. Use RegexMatcher")
if not whitelistedForGrpcInit(file_path):
grpc_init_or_shutdown = line.find("grpc_init()")
grpc_shutdown = line.find("grpc_shutdown()")
if grpc_init_or_shutdown == -1 or (grpc_shutdown != -1 and
grpc_shutdown < grpc_init_or_shutdown):
grpc_init_or_shutdown = grpc_shutdown
if grpc_init_or_shutdown != -1:
comment = line.find("// ")
if comment == -1 or comment > grpc_init_or_shutdown:
reportError("Don't call grpc_init() or grpc_shutdown() directly, instantiate " +
"Grpc::GoogleGrpcContext. See #8282")
def checkBuildLine(line, file_path, reportError):
if "@bazel_tools" in line and not (isSkylarkFile(file_path) or file_path.startswith("./bazel/")):
reportError("unexpected @bazel_tools reference, please indirect via a definition in //bazel")
if not whitelistedForProtobufDeps(file_path) and '"protobuf"' in line:
reportError("unexpected direct external dependency on protobuf, use "
"//source/common/protobuf instead.")
if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and
not isExternalBuildFile(file_path) and "@envoy//" in line):
reportError("Superfluous '@envoy//' prefix")
def fixBuildLine(file_path, line):
if (envoy_build_rule_check and not isSkylarkFile(file_path) and not isWorkspaceFile(file_path) and
not isExternalBuildFile(file_path)):
line = line.replace("@envoy//", "//")
return line
def fixBuildPath(file_path):
replaceLines(file_path, functools.partial(fixBuildLine, file_path))
error_messages = []
# TODO(htuch): Add API specific BUILD fixer script.
if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isSkylarkFile(
file_path) and not isWorkspaceFile(file_path):
if os.system("%s %s %s" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)) != 0:
error_messages += ["envoy_build_fixer rewrite failed for file: %s" % file_path]
if os.system("%s -mode=fix %s" % (BUILDIFIER_PATH, file_path)) != 0:
error_messages += ["buildifier rewrite failed for file: %s" % file_path]
return error_messages
def checkBuildPath(file_path):
error_messages = []
if not isBuildFixerExcludedFile(file_path) and not isApiFile(file_path) and not isSkylarkFile(
file_path) and not isWorkspaceFile(file_path):
command = "%s %s | diff %s -" % (ENVOY_BUILD_FIXER_PATH, file_path, file_path)
error_messages += executeCommand(command, "envoy_build_fixer check failed", file_path)
if isBuildFile(file_path) and (file_path.startswith(args.api_prefix + "envoy") or
file_path.startswith(args.api_shadow_prefix + "envoy")):
found = False
for line in readLines(file_path):
if "api_proto_package(" in line:
found = True
break
if not found:
error_messages += ["API build file does not provide api_proto_package()"]
command = "%s -mode=diff %s" % (BUILDIFIER_PATH, file_path)
error_messages += executeCommand(command, "buildifier check failed", file_path)
error_messages += checkFileContents(file_path, checkBuildLine)
return error_messages
def fixSourcePath(file_path):
replaceLines(file_path, fixSourceLine)
error_messages = []
if not file_path.endswith(DOCS_SUFFIX):
if not file_path.endswith(PROTO_SUFFIX):
error_messages += fixHeaderOrder(file_path)
error_messages += clangFormat(file_path)
if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path):
package_name, error_message = packageNameForProto(file_path)
if package_name is None:
error_messages += error_message
return error_messages
def checkSourcePath(file_path):
error_messages = checkFileContents(file_path, checkSourceLine)
if not file_path.endswith(DOCS_SUFFIX):
if not file_path.endswith(PROTO_SUFFIX):
error_messages += checkNamespace(file_path)
command = ("%s --include_dir_order %s --path %s | diff %s -" %
(HEADER_ORDER_PATH, include_dir_order, file_path, file_path))
error_messages += executeCommand(command, "header_order.py check failed", file_path)
command = ("%s %s | diff %s -" % (CLANG_FORMAT_PATH, file_path, file_path))
error_messages += executeCommand(command, "clang-format check failed", file_path)
if file_path.endswith(PROTO_SUFFIX) and isApiFile(file_path):
package_name, error_message = packageNameForProto(file_path)
if package_name is None:
error_messages += error_message
return error_messages
# Example target outputs are:
# - "26,27c26"
# - "12,13d13"
# - "7a8,9"
def executeCommand(command,
error_message,
file_path,
regex=re.compile(r"^(\d+)[a|c|d]?\d*(?:,\d+[a|c|d]?\d*)?$")):
try:
output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT).strip()
if output:
return output.decode('utf-8').split("\n")
return []
except subprocess.CalledProcessError as e:
if (e.returncode != 0 and e.returncode != 1):
return ["ERROR: something went wrong while executing: %s" % e.cmd]
# In case we can't find any line numbers, record an error message first.
error_messages = ["%s for file: %s" % (error_message, file_path)]
for line in e.output.decode('utf-8').splitlines():
for num in regex.findall(line):
error_messages.append(" %s:%s" % (file_path, num))
return error_messages
def fixHeaderOrder(file_path):
command = "%s --rewrite --include_dir_order %s --path %s" % (HEADER_ORDER_PATH, include_dir_order,
file_path)
if os.system(command) != 0:
return ["header_order.py rewrite error: %s" % (file_path)]
return []
def clangFormat(file_path):
command = "%s -i %s" % (CLANG_FORMAT_PATH, file_path)
if os.system(command) != 0:
return ["clang-format rewrite error: %s" % (file_path)]
return []
def checkFormat(file_path):
if file_path.startswith(EXCLUDED_PREFIXES):
return []
if not file_path.endswith(SUFFIXES):
return []
error_messages = []
# Apply fixes first, if asked, and then run checks. If we wind up attempting to fix
# an issue, but there's still an error, that's a problem.
try_to_fix = operation_type == "fix"
if isBuildFile(file_path) or isSkylarkFile(file_path) or isWorkspaceFile(file_path):
if try_to_fix:
error_messages += fixBuildPath(file_path)
error_messages += checkBuildPath(file_path)
else:
if try_to_fix:
error_messages += fixSourcePath(file_path)
error_messages += checkSourcePath(file_path)
if error_messages:
return ["From %s" % file_path] + error_messages
return error_messages
def checkFormatReturnTraceOnError(file_path):
"""Run checkFormat and return the traceback of any exception."""
try:
return checkFormat(file_path)
except:
return traceback.format_exc().split("\n")
def checkOwners(dir_name, owned_directories, error_messages):
"""Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS
Args:
dir_name: the directory being checked.
owned_directories: directories currently listed in CODEOWNERS.
error_messages: where to put an error message for new unowned directories.
"""
found = False
for owned in owned_directories:
if owned.startswith(dir_name) or dir_name.startswith(owned):
found = True
if not found and dir_name not in UNOWNED_EXTENSIONS:
error_messages.append("New directory %s appears to not have owners in CODEOWNERS" % dir_name)
def checkFormatVisitor(arg, dir_name, names):
"""Run checkFormat in parallel for the given files.
Args:
arg: a tuple (pool, result_list, owned_directories, error_messages)
pool and result_list are for starting tasks asynchronously.
owned_directories tracks directories listed in the CODEOWNERS file.
error_messages is a list of string format errors.
dir_name: the parent directory of the given files.
names: a list of file names.
"""
# Unpack the multiprocessing.Pool process pool and list of results. Since
# python lists are passed as references, this is used to collect the list of
# async results (futures) from running checkFormat and passing them back to
# the caller.
pool, result_list, owned_directories, error_messags = arg
# Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded
# manner as it is a small and limited list.
source_prefix = './source/'
full_prefix = './source/extensions/'
# Check to see if this directory is a subdir under /source/extensions
# Also ignore top level directories under /source/extensions since we don't
# need owners for source/extensions/access_loggers etc, just the subdirectories.
if dir_name.startswith(full_prefix) and '/' in dir_name[len(full_prefix):]:
checkOwners(dir_name[len(source_prefix):], owned_directories, error_messages)
for file_name in names:
result = pool.apply_async(checkFormatReturnTraceOnError, args=(dir_name + "/" + file_name,))
result_list.append(result)
# checkErrorMessages iterates over the list with error messages and prints
# errors and returns a bool based on whether there were any errors.
def checkErrorMessages(error_messages):
if error_messages:
for e in error_messages:
print("ERROR: %s" % e)
return True
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check or fix file format.")
parser.add_argument("operation_type",
type=str,
choices=["check", "fix"],
help="specify if the run should 'check' or 'fix' format.")
parser.add_argument(
"target_path",
type=str,
nargs="?",
default=".",
help="specify the root directory for the script to recurse over. Default '.'.")
parser.add_argument("--add-excluded-prefixes",
type=str,
nargs="+",
help="exclude additional prefixes.")
parser.add_argument("-j",
"--num-workers",
type=int,
default=multiprocessing.cpu_count(),
help="number of worker processes to use; defaults to one per core.")
parser.add_argument("--api-prefix", type=str, default="./api/", help="path of the API tree.")
parser.add_argument("--api-shadow-prefix",
type=str,
default="./generated_api_shadow/",
help="path of the shadow API tree.")
parser.add_argument("--skip_envoy_build_rule_check",
action="store_true",
help="skip checking for '@envoy//' prefix in build rules.")
parser.add_argument("--namespace_check",
type=str,
nargs="?",
default="Envoy",
help="specify namespace check string. Default 'Envoy'.")
parser.add_argument("--namespace_check_excluded_paths",
type=str,
nargs="+",
default=[],
help="exclude paths from the namespace_check.")
parser.add_argument("--build_fixer_check_excluded_paths",
type=str,
nargs="+",
default=[],
help="exclude paths from envoy_build_fixer check.")
parser.add_argument("--include_dir_order",
type=str,
default=",".join(common.includeDirOrder()),
help="specify the header block include directory order.")
args = parser.parse_args()
operation_type = args.operation_type
target_path = args.target_path
envoy_build_rule_check = not args.skip_envoy_build_rule_check
namespace_check = args.namespace_check
namespace_check_excluded_paths = args.namespace_check_excluded_paths + [
"./tools/api_boost/testdata/",
"./tools/clang_tools/",
]
build_fixer_check_excluded_paths = args.build_fixer_check_excluded_paths + [
"./bazel/external/",
"./bazel/toolchains/",
"./bazel/BUILD",
"./tools/clang_tools",
]
include_dir_order = args.include_dir_order
if args.add_excluded_prefixes:
EXCLUDED_PREFIXES += tuple(args.add_excluded_prefixes)
# Check whether all needed external tools are available.
ct_error_messages = checkTools()
if checkErrorMessages(ct_error_messages):
sys.exit(1)
# Returns the list of directories with owners listed in CODEOWNERS. May append errors to
# error_messages.
def ownedDirectories(error_messages):
owned = []
maintainers = [
'@mattklein123', '@htuch', '@alyssawilk', '@zuercher', '@lizan', '@snowp', '@junr03',
'@dnoe', '@dio', '@jmarantz'
]
try:
with open('./CODEOWNERS') as f:
for line in f:
# If this line is of the form "extensions/... @owner1 @owner2" capture the directory
# name and store it in the list of directories with documented owners.
m = re.search(r'.*(extensions[^@]*\s+)(@.*)', line)
if m is not None and not line.startswith('#'):
owned.append(m.group(1).strip())
owners = re.findall('@\S+', m.group(2).strip())
if len(owners) < 2:
error_messages.append("Extensions require at least 2 owners in CODEOWNERS:\n"
" {}".format(line))
maintainer = len(set(owners).intersection(set(maintainers))) > 0
if not maintainer:
error_messages.append("Extensions require at least one maintainer OWNER:\n"
" {}".format(line))
return owned
except IOError:
return [] # for the check format tests.
# Calculate the list of owned directories once per run.
error_messages = []
owned_directories = ownedDirectories(error_messages)
if os.path.isfile(target_path):
error_messages += checkFormat("./" + target_path)
else:
pool = multiprocessing.Pool(processes=args.num_workers)
results = []
# For each file in target_path, start a new task in the pool and collect the
# results (results is passed by reference, and is used as an output).
for root, _, files in os.walk(target_path):
checkFormatVisitor((pool, results, owned_directories, error_messages), root, files)
# Close the pool to new tasks, wait for all of the running tasks to finish,
# then collect the error messages.
pool.close()
pool.join()
error_messages += sum((r.get() for r in results), [])
if checkErrorMessages(error_messages):
print("ERROR: check format failed. run 'tools/check_format.py fix'")
sys.exit(1)
if operation_type == "check":
print("PASS")
| tools/check_format.py | 39,608 | Run checkFormat and return the traceback of any exception.
Run checkFormat in parallel for the given files.
Args:
arg: a tuple (pool, result_list, owned_directories, error_messages)
pool and result_list are for starting tasks asynchronously.
owned_directories tracks directories listed in the CODEOWNERS file.
error_messages is a list of string format errors.
dir_name: the parent directory of the given files.
names: a list of file names.
Checks to make sure a given directory is present either in CODEOWNERS or OWNED_EXTENSIONS
Args:
dir_name: the directory being checked.
owned_directories: directories currently listed in CODEOWNERS.
error_messages: where to put an error message for new unowned directories.
!/usr/bin/env python3 Files in these paths can make reference to protobuf stuff directly Files matching these exact names can reference real-world time. These include the class definitions for real-world time, the construction of them in main(), and perf annotation. For now it includes the validation server but that really should be injected too. Files in these paths can use MessageLite::SerializeAsString Files in these paths can use Protobuf::util::JsonStringToMessage Histogram names which are allowed to be suffixed with the unit symbol, all of the pre-existing ones were grandfathered as part of PR 8484 for backwards compatibility. Files in these paths can use std::regex Only one C++ file should instantiate grpc_init yapf: disable Well-known types should be referenced from the ProtobufWkt namespace. Other common mis-namespacing of protobuf types. yapf: enable Map a line transformation function across each line of a file. .bak temporaries. We used to use fileinput in the older Python 2.7 script, but this doesn't do inplace mode and UTF-8 in Python 3, so doing it the manual way. Obtain all the lines in a given file. Read a UTF-8 encoded file as a str. lookPath searches for the given executable in all directories in PATH environment variable. If it cannot be found, empty string is returned. pathExists checks whether the given path exists. This function assumes that the path is absolute and evaluates environment variables. executableByOthers checks whether the given path has execute permission for others. Check whether all needed external tools (clang-format, buildifier, buildozer) are available. To avoid breaking the Lyft import, we just check for path inclusion here. Real-world time sources should not be instantiated in the source, except for a few specific cases. They should be passed down from where they are instantied to where they need to be used, e.g. through the ServerInstance, Dispatcher, or ClusterManager. If we were in the section for the current release this means we have passed it. If we see a version marker we are now in the section for the current release. Version file checking has enough special cased logic to merit its own checks. This only validates entries for the current release as very old release notes have a different format. Strip double space after '.' This may prove overenthusiastic and need to be restricted to comments and metadata files but works for now. Fix incorrect protobuf namespace references. Use recommended cpp stdlib We want to look for a call to condvar.waitFor, but there's no strong pattern to the variable name of the condvar. If we just look for ".waitFor" we'll also pick up time_system_.waitFor(...), and we don't want to return true for that pattern. But in that case there is a strong pattern of using time_system in various spellings as the variable name. Determines whether the filename is either in the specified subdirectory, or at the top level. We consider files in the top level for the benefit of the check_format testcases in tools/testdata/check_format. Skip this check for check_format's unit-tests. Check fixable errors. These may have been fixed already. Do not include the virtual_includes headers. Some errors cannot be fixed automatically, and actionable, consistent, navigable messages should be emitted to make it easy to find and fix the errors by hand. We don't check here for std::mutex because that may legitimately show up in comments, for example this one. We don't check here for std::shared_timed_mutex because that may legitimately show up in comments, for example this one. Check that we use the absl::Time library The std::atomic_* free functions are functionally equivalent to calling operations on std::atomic<T> objects, so prefer to use that instead. __attribute__((packed)) is not supported by MSVC, we have a PACKED_STRUCT macro that can be used instead Designated initializers are not part of the C++14 standard and are not supported by MSVC The ?: operator is non-standard, it is a GCC extension The MessageLite::SerializeAsString doesn't generate deterministic serialization, use MessageUtil::hash instead. Centralize all usage of JSON parsing so it is easier to make changes in JSON parsing behavior. TODO(htuch): Add API specific BUILD fixer script. Example target outputs are: - "26,27c26" - "12,13d13" - "7a8,9" In case we can't find any line numbers, record an error message first. Apply fixes first, if asked, and then run checks. If we wind up attempting to fix an issue, but there's still an error, that's a problem. Unpack the multiprocessing.Pool process pool and list of results. Since python lists are passed as references, this is used to collect the list of async results (futures) from running checkFormat and passing them back to the caller. Sanity check CODEOWNERS. This doesn't need to be done in a multi-threaded manner as it is a small and limited list. Check to see if this directory is a subdir under /source/extensions Also ignore top level directories under /source/extensions since we don't need owners for source/extensions/access_loggers etc, just the subdirectories. checkErrorMessages iterates over the list with error messages and prints errors and returns a bool based on whether there were any errors. Check whether all needed external tools are available. Returns the list of directories with owners listed in CODEOWNERS. May append errors to error_messages. If this line is of the form "extensions/... @owner1 @owner2" capture the directory name and store it in the list of directories with documented owners. for the check format tests. Calculate the list of owned directories once per run. For each file in target_path, start a new task in the pool and collect the results (results is passed by reference, and is used as an output). Close the pool to new tasks, wait for all of the running tasks to finish, then collect the error messages. | 6,648 | en | 0.88576 |
#!usr/bin/env python
import pyeapi
import yaml
from getpass import getpass
from pprint import pprint
from jinja2 import Template
## Loading the yaml file
with open("arista_connect1.yml") as f:
device_dict = yaml.load(f)
new_list = []
### Keys in the dictionary stored in a list
for k in device_dict.keys():
new_list.append(k)
### data and connect for 4 arista switches
intf_vars = {}
connect_dict = {}
arista_1 = device_dict[new_list[0]]
arista_2 = device_dict[new_list[1]]
arista_3 = device_dict[new_list[2]]
arista_4 = device_dict[new_list[3]]
for k,v in arista_1.items():
if k == 'data':
intf_vars = arista_1[k]
else:
connect_dict[k] = arista_1[k]
connection = pyeapi.client.connect(**connect_dict,password=getpass())
device = pyeapi.client.Node(connection)
interface_config = '''
interface {{ intf_name }}
ip address {{ intf_ip }}/{{ intf_mask }}
'''
j2_template = Template(interface_config)
output = j2_template.render(**intf_vars)
config = (output.strip('/n')).split('\n')
cfg = config[1:3]
out = device.config(cfg)
print(out)
show_ip_int = device.enable("show ip interface brief")
pprint(show_ip_int)
#### For arista switch 2
for k,v in arista_2.items():
if k == 'data':
intf_vars = arista_2[k]
else:
connect_dict[k] = arista_2[k]
connection = pyeapi.client.connect(**connect_dict,password=getpass())
device = pyeapi.client.Node(connection)
output = j2_template.render(**intf_vars)
config = (output.strip('/n')).split('\n')
cfg = config[1:3]
out = device.config(cfg)
print(out)
show_ip_int = device.enable("show ip interface brief")
pprint(show_ip_int)
### Arista switch 3
for k,v in arista_3.items():
if k == 'data':
intf_vars = arista_3[k]
else:
connect_dict[k] = arista_3[k]
connection = pyeapi.client.connect(**connect_dict,password=getpass())
device = pyeapi.client.Node(connection)
output = j2_template.render(**intf_vars)
config = (output.strip('/n')).split('\n')
cfg = config[1:3]
out = device.config(cfg)
print(out)
show_ip_int = device.enable("show ip interface brief")
pprint(show_ip_int)
#### For arista switch 4
for k,v in arista_4.items():
if k == 'data':
intf_vars = arista_4[k]
else:
connect_dict[k] = arista_4[k]
connection = pyeapi.client.connect(**connect_dict,password=getpass())
device = pyeapi.client.Node(connection)
output = j2_template.render(**intf_vars)
config = (output.strip('/n')).split('\n')
cfg = config[1:3]
out = device.config(cfg)
print(out)
show_ip_int = device.enable("show ip interface brief")
pprint(show_ip_int)
| arista_exercise/ex4_example.py | 2,567 | !usr/bin/env python Loading the yaml file Keys in the dictionary stored in a list data and connect for 4 arista switches For arista switch 2 Arista switch 3 For arista switch 4 | 176 | en | 0.627227 |
#!C:\Users\Claudia\PycharmProjects\BlackJack\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| venv/Scripts/pip3.7-script.py | 421 | !C:\Users\Claudia\PycharmProjects\BlackJack\venv\Scripts\python.exe EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7' | 135 | en | 0.396494 |
# -*- coding: utf-8 -*-
"""
Source: https://github.com/awesto/django-shop/blob/12e246b356dbc1bc5bbdc8f056e3cb109c617997/shop/money/__init__.py
"""
from .money_maker import MoneyMaker, AbstractMoney
# The default Money type for this shop
Money = MoneyMaker()
| backend/edw_shop/money/__init__.py | 259 | Source: https://github.com/awesto/django-shop/blob/12e246b356dbc1bc5bbdc8f056e3cb109c617997/shop/money/__init__.py
-*- coding: utf-8 -*- The default Money type for this shop | 175 | en | 0.566926 |
# -*- coding: utf-8 -*-
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# imports ######################################################################
import rsa
# participants #################################################################
(sender_pub_key, sender_priv_key) = rsa.newkeys(1024, poolsize=4)
(receiver_pub_key, receiver_priv_key) = rsa.newkeys(1024, poolsize=4)
# side of sender ###############################################################
message = 'Hello'
print('Message:', message)
bin_message = message.encode()
print('Bytes message:', bin_message)
signature = rsa.sign(bin_message, sender_priv_key, 'SHA-512')
print('Signature:', signature)
crypto = rsa.encrypt(bin_message, receiver_pub_key)
print('Encrypted message:', crypto)
# side of receiver
bin_decrypted = rsa.decrypt(crypto, receiver_priv_key)
print('Decrypted bytes message:', bin_decrypted)
if rsa.verify(bin_decrypted, signature, sender_pub_key):
print('Message signature verified.')
else:
print('SIGNATURE VERIFICATION ERROR!')
decrypted = bin_decrypted.decode()
print('Decrypted message:', decrypted)
# END ##########################################################################
| other/tests/rsa-example.py | 1,350 | -*- coding: utf-8 -*- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. imports participants side of sender side of receiver END | 274 | en | 0.907824 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, TYPE_CHECKING
from msrest import Deserializer, Serializer
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from .. import models
from ._configuration import PolicyClientConfiguration
from .operations import PolicyAssignmentsOperations, PolicyDefinitionsOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class PolicyClient:
"""To manage and control access to your resources, you can define customized policies and assign
them at a scope.
:ivar policy_assignments: PolicyAssignmentsOperations operations
:vartype policy_assignments:
azure.mgmt.resource.policy.v2016_04_01.aio.operations.PolicyAssignmentsOperations
:ivar policy_definitions: PolicyDefinitionsOperations operations
:vartype policy_definitions:
azure.mgmt.resource.policy.v2016_04_01.aio.operations.PolicyDefinitionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2016-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = PolicyClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.policy_assignments = PolicyAssignmentsOperations(self._client, self._config, self._serialize, self._deserialize)
self.policy_definitions = PolicyDefinitionsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "PolicyClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2016_04_01/aio/_policy_client.py | 4,405 | To manage and control access to your resources, you can define customized policies and assign
them at a scope.
:ivar policy_assignments: PolicyAssignmentsOperations operations
:vartype policy_assignments:
azure.mgmt.resource.policy.v2016_04_01.aio.operations.PolicyAssignmentsOperations
:ivar policy_definitions: PolicyDefinitionsOperations operations
:vartype policy_definitions:
azure.mgmt.resource.policy.v2016_04_01.aio.operations.PolicyDefinitionsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2016-04-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- pylint: disable=unused-import,ungrouped-imports | 2,190 | en | 0.641903 |
from typing import Tuple, Union
from discord import Embed, Member, PermissionOverwrite, TextChannel, VoiceChannel, VoiceState
from discord.ext.commands import bot_has_guild_permissions
from discord_slash import (
Button,
ComponentContext,
Modal,
ModalContext,
Select,
SelectOption,
SlashCommandOptionType,
SlashContext,
TextInput,
TextInputStyle,
)
from discord_slash.cog_ext import cog_subcommand as slash_subcommand
from discord_slash.utils.manage_commands import create_option
from utils import (
AsteroidBot,
Cog,
DiscordColors,
DontHavePrivateRoom,
GuildData,
GuildPrivateVoice,
bot_owner_or_permissions,
cog_is_enabled,
get_content,
is_enabled,
)
class PrivateRooms(Cog):
def __init__(self, bot: AsteroidBot) -> None:
self.bot = bot
self.emoji = "🔊"
self.name = "PrivateRooms"
async def __check(
self, ctx: SlashContext, *, return_guild_data: bool = False
) -> Union[Tuple[VoiceChannel, dict], Tuple[VoiceChannel, dict, GuildData]]:
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
raise DontHavePrivateRoom
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
if return_guild_data:
return voice_channel, content, guild_data
return voice_channel, content
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="close",
description="Closes your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_close(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, connect=False)
await ctx.send(content["ROOM_CLOSED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="open",
description="Opens your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_open(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, connect=True)
await ctx.send(content["ROOM_OPENED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="hide",
description="Hides your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_hide(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=False)
await ctx.send(content["ROOM_HIDED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="unhide",
description="Unhides your room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_unhide(self, ctx: SlashContext):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=True)
await ctx.send(content["ROOM_UNHIDED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="change_name",
description="Change room name",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_change__name(self, ctx: SlashContext, name: str):
voice_channel, content = await self.__check(ctx)
await voice_channel.edit(name=name)
await ctx.send(content["ROOM_NAME_WAS_SETUP"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="ban",
description="Bans member to room",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
async def private__rooms_control_ban(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(member, connect=False)
if member.voice and member.voice.channel.id == voice_channel.id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_BANNED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="unban",
description="Unbans member from room",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_unban(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
await voice_channel.set_permissions(member, connect=True)
await ctx.send(content["MEMBER_WAS_UNBANNED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="kick",
description="Kicks a member from room",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
async def private__rooms_control_kick(self, ctx: SlashContext, member: Member):
voice_channel, content = await self.__check(ctx)
if member.voice and member.voice.channel.id == voice_channel.id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_KICKED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="transfer_ownership",
description="Transfer room ownership",
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_transfer__ownership(self, ctx: SlashContext, member: Member):
voice_channel, content, guild_data = await self.__check(ctx, return_guild_data=True)
await guild_data.private_voice.set_private_voice_channel(member.id, voice_channel.id)
await voice_channel.set_permissions(
member, manage_channels=True, connect=True, move_members=True
)
await voice_channel.set_permissions(
ctx.author, manage_channels=False, connect=False, move_members=False
)
await ctx.send(content["OWNERSHIP_TRANSFERED"], hidden=True)
@slash_subcommand(
base="private_rooms",
subcommand_group="control",
name="set_limit",
description="Sets room limit",
options=[
create_option(
name="limit",
description="The limit of members in your room",
option_type=SlashCommandOptionType.INTEGER,
min_value=1,
max_value=99,
)
],
)
@is_enabled()
@bot_has_guild_permissions(manage_channels=True)
async def private__rooms_control_set__limit(self, ctx: SlashContext, limit: int):
voice_channel, content = await self.__check(ctx)
await voice_channel.edit(user_limit=limit)
await ctx.send(content["LIMIT_WAS_SETUP"], hidden=True)
@slash_subcommand(
base="private_rooms",
name="create_menu",
description="Creates a control menu",
)
@is_enabled()
@bot_has_guild_permissions(move_members=True, manage_channels=True)
@bot_owner_or_permissions(manage_guild=True)
async def private__rooms_create__menu(self, ctx: SlashContext):
await ctx.defer(hidden=True)
guild_data = await self.bot.get_guild_data(ctx.guild_id)
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
components = [
[
Button(emoji=self.bot.get_emoji(959124362840113182), custom_id="voice_close"),
Button(emoji=self.bot.get_emoji(959124362890461254), custom_id="voice_open"),
Button(emoji=self.bot.get_emoji(959124362890461325), custom_id="voice_hide"),
Button(emoji=self.bot.get_emoji(959124362890473582), custom_id="voice_unhide"),
Button(
emoji=self.bot.get_emoji(959124362798174319), custom_id="voice_change_room_name"
),
],
[
Button(emoji=self.bot.get_emoji(959124362882068550), custom_id="voice_ban"),
Button(emoji=self.bot.get_emoji(959124362835931236), custom_id="voice_unban"),
Button(emoji=self.bot.get_emoji(959124362974343169), custom_id="voice_kick"),
Button(emoji=self.bot.get_emoji(959124362823340052), custom_id="voice_transfer"),
Button(
emoji=self.bot.get_emoji(959124362835927080), custom_id="voice_set_room_limit"
),
],
]
category = await ctx.guild.create_category(content["PRIVATE_ROOMS"])
voice_channel = await category.create_voice_channel(content["CREATE_ROOM"])
overwrites = {
ctx.guild.default_role: PermissionOverwrite(
send_messages=False, use_slash_commands=False
)
}
text_channel: TextChannel = await category.create_text_channel(
content["ROOM_CONTROL"], overwrites=overwrites
)
await guild_data.create_private_voice(text_channel.id, voice_channel.id)
embed = Embed(
title=content["ROOM_CONTROL_TITLE"],
description="".join(content["ROOM_CONTROL_DESCRIPTION"]),
color=DiscordColors.EMBED_COLOR,
)
await text_channel.send(embed=embed, components=components)
await ctx.send(content["SUCCESSFULLY_CREATED"])
@Cog.listener()
@cog_is_enabled()
async def on_voice_state_update(self, member: Member, before: VoiceState, after: VoiceState):
guild_data = await self.bot.get_guild_data(member.guild.id)
private_voice = guild_data.private_voice
voice_channel_id = private_voice.voice_channel_id
if after.channel and after.channel.id == voice_channel_id:
if before.channel:
await self._check_channel(member, before, private_voice)
# Creating a private voice channel
overwrites = {
member.guild.default_role: PermissionOverwrite(connect=False),
member: PermissionOverwrite(manage_channels=True, connect=True, move_members=True),
}
channel: VoiceChannel = await after.channel.category.create_voice_channel(
f"{member.display_name}'s channel", overwrites=overwrites
)
await member.move_to(channel)
await private_voice.set_private_voice_channel(member.id, channel.id)
return
if before.channel:
await self._check_channel(member, before, private_voice)
async def _check_channel(
self, member: Member, before: VoiceState, private_voice: GuildPrivateVoice
):
if not (channel_id := private_voice.active_channels.get(str(member.id))):
return
if before.channel.id != channel_id:
return
if not before.channel.members:
await before.channel.delete()
await private_voice.delete_private_voice_channel(member.id)
return
first_member = before.channel.members[0]
await private_voice.set_private_voice_channel(first_member.id, before.channel.id)
await before.channel.set_permissions(
member, manage_channels=False, connect=False, move_members=False
)
await before.channel.set_permissions(
first_member, manage_channels=True, connect=True, move_members=True
)
@Cog.listener()
@cog_is_enabled()
async def on_button_click(self, ctx: ComponentContext):
if not ctx.custom_id.startswith("voice"):
return
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
match ctx.custom_id:
case "voice_close":
await voice_channel.set_permissions(ctx.guild.default_role, connect=False)
await ctx.send(content["ROOM_CLOSED"], hidden=True)
case "voice_open":
await voice_channel.set_permissions(ctx.guild.default_role, connect=True)
await ctx.send(content["ROOM_OPENED"], hidden=True)
case "voice_hide":
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=False)
await ctx.send(content["ROOM_HIDED"], hidden=True)
case "voice_unhide":
await voice_channel.set_permissions(ctx.guild.default_role, view_channel=True)
await ctx.send(content["ROOM_UNHIDED"], hidden=True)
case "voice_change_room_name":
modal = Modal(
custom_id="voice_modal_change_room_name",
title=content["PRIVATE_ROOM_CONTROL_MODAL"],
components=[
TextInput(
custom_id="channel_name",
label=content["ROOM_NAME"],
style=TextInputStyle.SHORT,
)
],
)
await ctx.popup(modal)
case "voice_ban" | "voice_unban" | "voice_kick" | "voice_transfer":
modal = Modal(
custom_id=f"voice_modal_{ctx.custom_id.replace('voice', '')}",
title=content["PRIVATE_ROOM_CONTROL_MODAL"],
components=[
TextInput(
custom_id="user_id",
label=content["MEMBER_ID"],
style=TextInputStyle.SHORT,
)
],
)
await ctx.popup(modal)
case "voice_set_room_limit":
select = Select(
custom_id="voice_select_set_room_limit",
options=[
SelectOption(label=content["REMOVE_LIMIT"], value=0),
SelectOption(label="2", value=2),
SelectOption(label="3", value=3),
SelectOption(label="4", value=4),
SelectOption(label="5", value=5),
SelectOption(label="10", value=10),
],
)
await ctx.send(content["SETUP_ROOM_LIMIT"], components=[select], hidden=True)
@Cog.listener()
@cog_is_enabled()
async def on_select_option(self, ctx: ComponentContext):
if not ctx.custom_id.startswith("voice"):
return
guild_data = await self.bot.get_guild_data(ctx.guild_id)
active_channels = guild_data.private_voice.active_channels
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if str(ctx.author_id) not in active_channels:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(active_channels[str(ctx.author_id)])
await voice_channel.edit(user_limit=ctx.values[0])
await ctx.send(content["LIMIT_WAS_SETUP"], hidden=True)
@Cog.listener(name="on_modal")
@cog_is_enabled()
async def on_voice_modal(self, ctx: ModalContext):
if not ctx.custom_id.startswith("voice"):
return
await ctx.defer(hidden=True)
guild_data = await self.bot.get_guild_data(ctx.guild_id)
voice_channel_id = guild_data.private_voice.active_channels.get(str(ctx.author_id))
content = get_content("PRIVATE_VOICE", guild_data.configuration.language)
if voice_channel_id is None:
return await ctx.send(content["DONT_HAVE_PRIVATE_ROOM"], hidden=True)
voice_channel: VoiceChannel = ctx.guild.get_channel(voice_channel_id)
if channel_name := ctx.values.get("channel_name"):
await voice_channel.edit(name=channel_name)
return await ctx.send(content["ROOM_NAME_WAS_SETUP"], hidden=True)
user_id: str = ctx.values["user_id"]
if not user_id.isdigit():
return await ctx.send(content["NOT_ID"], hidden=True)
member: Member = ctx.guild.get_member(int(user_id))
if member is None:
return await ctx.send(content["NOT_MEMBER_ID"], hidden=True)
match ctx.custom_id:
case "voice_modal_ban":
await voice_channel.set_permissions(member, connect=False)
if member.voice and member.voice.channel.id == voice_channel_id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_BANNED"], hidden=True)
case "voice_modal_unban":
await voice_channel.set_permissions(member, connect=True)
await ctx.send(content["MEMBER_WAS_UNBANNED"], hidden=True)
case "voice_modal_kick":
if member.voice and member.voice.channel.id == voice_channel_id:
await member.move_to(None)
await ctx.send(content["MEMBER_WAS_KICKED"], hidden=True)
case "voice_modal_transfer":
await guild_data.private_voice.set_private_voice_channel(user_id, voice_channel_id)
await voice_channel.set_permissions(
member, manage_channels=True, connect=True, move_members=True
)
await voice_channel.set_permissions(
ctx.author, manage_channels=False, connect=False, move_members=False
)
await ctx.send(content["OWNERSHIP_TRANSFERED"], hidden=True)
def setup(bot):
bot.add_cog(PrivateRooms(bot))
| source/cogs/private_rooms.py | 18,513 | Creating a private voice channel | 32 | en | 0.889131 |
from flask import request
from flask_restplus import Resource
from app.project.auth import auth
from app.project.auth.auth_service import AuthService
from app.project.user.user_dto import UserDto
from app.project.user.user_service import UserService
api = UserDto.api
_user = UserDto.user
@api.route('/')
class UserList(Resource):
@api.doc('list_of_registered_users')
@api.marshal_list_with(_user, envelope='data')
def get(self):
"""List all registered users"""
return UserService.get_all_users()
@auth.login_required
@AuthService.admin_permission_required
@api.response(201, 'User successfully created.')
@api.doc('create a new user(only for admin)')
@api.expect(_user, validate=True)
def post(self):
"""Creates a new User(only for admin) """
user_service = UserService()
return user_service.create_user(request.json)
@api.route('/<public_id>')
@api.param('public_id', 'The User identifier')
@api.response(404, 'User not found.')
class User(Resource):
@api.doc('get a user')
@api.marshal_with(_user)
def get(self, public_id):
"""get a user given its identifier"""
user_service = UserService()
user_service.load_user(public_id)
if user_service.is_nan_user():
api.abort(404)
else:
return user_service.get_user_public()
| app/project/user/user_controller.py | 1,377 | List all registered users
get a user given its identifier
Creates a new User(only for admin) | 92 | en | 0.716425 |
from django.core.management.base import BaseCommand
# Third-Party
import requests
class Command(BaseCommand):
help = "Command to upload from dropbox."
def add_arguments(self, parser):
parser.add_argument(
'dropbox',
nargs='?',
)
def handle(self, *args, **options):
# Parse URL input
dropbox = options['dropbox']
p1 = dropbox.partition('?')
p2 = p1[0].rpartition('/')
filename = p2[2]
url = dropbox.replace("?dl=0", "?dl=1")
# open in binary mode
with open(filename, "wb") as file:
# get request
response = requests.get(url)
# write to file
file.write(response.content)
self.stdout.write("Uploaded {0}".format(filename))
return
| project/apps/core/management/commands/upload_file.py | 812 | Third-Party Parse URL input open in binary mode get request write to file | 73 | en | 0.801998 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Unitary gate."""
from warnings import warn
from typing import List, Optional, Union, Tuple
import numpy as np
from scipy.linalg import schur
from qiskit.circuit.parameter import ParameterExpression
from qiskit.circuit.exceptions import CircuitError
from .instruction import Instruction
class Gate(Instruction):
"""Unitary gate."""
def __init__(self, name: str, num_qubits: int, params: List,
label: Optional[str] = None) -> None:
"""Create a new gate.
Args:
name: The Qobj name of the gate.
num_qubits: The number of qubits the gate acts on.
params: A list of parameters.
label: An optional label for the gate.
"""
self._label = label
self.definition = None
super().__init__(name, num_qubits, 0, params)
# Set higher priority than Numpy array and matrix classes
__array_priority__ = 20
def to_matrix(self) -> np.ndarray:
"""Return a Numpy.array for the gate unitary matrix.
Returns:
np.ndarray: if the Gate subclass has a matrix definition.
Raises:
CircuitError: If a Gate subclass does not implement this method an
exception will be raised when this base class method is called.
"""
if hasattr(self, '__array__'):
# pylint: disable=no-member
return self.__array__(dtype=complex)
raise CircuitError("to_matrix not defined for this {}".format(type(self)))
def power(self, exponent: float):
"""Creates a unitary gate as `gate^exponent`.
Args:
exponent (float): Gate^exponent
Returns:
qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent.
Raises:
CircuitError: If Gate is not unitary
"""
from qiskit.quantum_info.operators import Operator # pylint: disable=cyclic-import
from qiskit.extensions.unitary import UnitaryGate # pylint: disable=cyclic-import
# Should be diagonalized because it's a unitary.
decomposition, unitary = schur(Operator(self).data, output='complex')
# Raise the diagonal entries to the specified power
decomposition_power = list()
decomposition_diagonal = decomposition.diagonal()
# assert off-diagonal are 0
if not np.allclose(np.diag(decomposition_diagonal), decomposition):
raise CircuitError('The matrix is not diagonal')
for element in decomposition_diagonal:
decomposition_power.append(pow(element, exponent))
# Then reconstruct the resulting gate.
unitary_power = unitary @ np.diag(decomposition_power) @ unitary.conj().T
return UnitaryGate(unitary_power, label='%s^%s' % (self.name, exponent))
def _return_repeat(self, exponent: float) -> 'Gate':
return Gate(name="%s*%s" % (self.name, exponent), num_qubits=self.num_qubits,
params=self.params)
def assemble(self) -> 'Instruction':
"""Assemble a QasmQobjInstruction"""
instruction = super().assemble()
if self.label:
instruction.label = self.label
return instruction
@property
def label(self) -> str:
"""Return gate label"""
return self._label
@label.setter
def label(self, name: str):
"""Set gate label to name
Args:
name (str or None): label to assign unitary
Raises:
TypeError: name is not string or None.
"""
if isinstance(name, (str, type(None))):
self._label = name
else:
raise TypeError('label expects a string or None')
def control(self, num_ctrl_qubits: Optional[int] = 1, label: Optional[str] = None,
ctrl_state: Optional[Union[int, str]] = None):
"""Return controlled version of gate. See :class:`.ControlledGate` for usage.
Args:
num_ctrl_qubits: number of controls to add to gate (default=1)
label: optional gate label
ctrl_state: The control state in decimal or as a bitstring
(e.g. '111'). If None, use 2**num_ctrl_qubits-1.
Returns:
qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm
uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size
num_qubits + 2*num_ctrl_qubits - 1.
Raises:
QiskitError: unrecognized mode or invalid ctrl_state
"""
# pylint: disable=cyclic-import
from .add_control import add_control
return add_control(self, num_ctrl_qubits, label, ctrl_state)
@staticmethod
def _broadcast_single_argument(qarg: List) -> List:
"""Expands a single argument.
For example: [q[0], q[1]] -> [q[0]], [q[1]]
"""
# [q[0], q[1]] -> [q[0]]
# -> [q[1]]
for arg0 in qarg:
yield [arg0], []
@staticmethod
def _broadcast_2_arguments(qarg0: List, qarg1: List) -> List:
if len(qarg0) == len(qarg1):
# [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[1], r[1]]
for arg0, arg1 in zip(qarg0, qarg1):
yield [arg0, arg1], []
elif len(qarg0) == 1:
# [[q[0]], [r[0], r[1]]] -> [q[0], r[0]]
# -> [q[0], r[1]]
for arg1 in qarg1:
yield [qarg0[0], arg1], []
elif len(qarg1) == 1:
# [[q[0], q[1]], [r[0]]] -> [q[0], r[0]]
# -> [q[1], r[0]]
for arg0 in qarg0:
yield [arg0, qarg1[0]], []
else:
raise CircuitError('Not sure how to combine these two-qubit arguments:\n %s\n %s' %
(qarg0, qarg1))
@staticmethod
def _broadcast_3_or_more_args(qargs: List) -> List:
if all(len(qarg) == len(qargs[0]) for qarg in qargs):
for arg in zip(*qargs):
yield list(arg), []
else:
raise CircuitError(
'Not sure how to combine these qubit arguments:\n %s\n' % qargs)
def broadcast_arguments(self, qargs: List, cargs: List) -> Tuple[List, List]:
"""Validation and handling of the arguments and its relationship.
For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This
method yields the arguments in the right grouping. In the given example::
in: [[q[0],q[1]], q[2]],[]
outs: [q[0], q[2]], []
[q[1], q[2]], []
The general broadcasting rules are:
* If len(qargs) == 1::
[q[0], q[1]] -> [q[0]],[q[1]]
* If len(qargs) == 2::
[[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]]
[[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]]
[[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]]
* If len(qargs) >= 3::
[q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...]
Args:
qargs: List of quantum bit arguments.
cargs: List of classical bit arguments.
Returns:
A tuple with single arguments.
Raises:
CircuitError: If the input is not valid. For example, the number of
arguments does not match the gate expectation.
"""
if len(qargs) != self.num_qubits or cargs:
raise CircuitError(
f'The amount of qubit({len(qargs)})/clbit({len(cargs)}) arguments does'
f' not match the gate expectation ({self.num_qubits}).')
if any(not qarg for qarg in qargs):
raise CircuitError('One or more of the arguments are empty')
if len(qargs) == 1:
return Gate._broadcast_single_argument(qargs[0])
elif len(qargs) == 2:
return Gate._broadcast_2_arguments(qargs[0], qargs[1])
elif len(qargs) >= 3:
return Gate._broadcast_3_or_more_args(qargs)
else:
raise CircuitError('This gate cannot handle %i arguments' % len(qargs))
def validate_parameter(self, parameter):
"""Gate parameters should be int, float, or ParameterExpression"""
if isinstance(parameter, ParameterExpression):
if len(parameter.parameters) > 0:
return parameter # expression has free parameters, we cannot validate it
if not parameter._symbol_expr.is_real:
raise CircuitError("Bound parameter expression is complex in gate {}".format(
self.name))
return parameter # per default assume parameters must be real when bound
if isinstance(parameter, (int, float)):
return parameter
elif isinstance(parameter, (np.integer, np.floating)):
return parameter.item()
elif isinstance(parameter, np.ndarray):
warn("Gate param type %s is being deprecated as of 0.16.0, and will be removed "
"no earlier than 3 months after that release date. "
"Considering creating your own Gate subclass with the method validate_parameter "
" to allow this param type." % type(parameter), DeprecationWarning, 3)
return parameter
else:
raise CircuitError("Invalid param type {0} for gate {1}.".format(type(parameter),
self.name))
| qiskit/circuit/gate.py | 10,144 | Unitary gate.
Create a new gate.
Args:
name: The Qobj name of the gate.
num_qubits: The number of qubits the gate acts on.
params: A list of parameters.
label: An optional label for the gate.
Expands a single argument.
For example: [q[0], q[1]] -> [q[0]], [q[1]]
Assemble a QasmQobjInstruction
Validation and handling of the arguments and its relationship.
For example, ``cx([q[0],q[1]], q[2])`` means ``cx(q[0], q[2]); cx(q[1], q[2])``. This
method yields the arguments in the right grouping. In the given example::
in: [[q[0],q[1]], q[2]],[]
outs: [q[0], q[2]], []
[q[1], q[2]], []
The general broadcasting rules are:
* If len(qargs) == 1::
[q[0], q[1]] -> [q[0]],[q[1]]
* If len(qargs) == 2::
[[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]], [q[1], r[1]]
[[q[0]], [r[0], r[1]]] -> [q[0], r[0]], [q[0], r[1]]
[[q[0], q[1]], [r[0]]] -> [q[0], r[0]], [q[1], r[0]]
* If len(qargs) >= 3::
[q[0], q[1]], [r[0], r[1]], ...] -> [q[0], r[0], ...], [q[1], r[1], ...]
Args:
qargs: List of quantum bit arguments.
cargs: List of classical bit arguments.
Returns:
A tuple with single arguments.
Raises:
CircuitError: If the input is not valid. For example, the number of
arguments does not match the gate expectation.
Return controlled version of gate. See :class:`.ControlledGate` for usage.
Args:
num_ctrl_qubits: number of controls to add to gate (default=1)
label: optional gate label
ctrl_state: The control state in decimal or as a bitstring
(e.g. '111'). If None, use 2**num_ctrl_qubits-1.
Returns:
qiskit.circuit.ControlledGate: Controlled version of gate. This default algorithm
uses num_ctrl_qubits-1 ancillae qubits so returns a gate of size
num_qubits + 2*num_ctrl_qubits - 1.
Raises:
QiskitError: unrecognized mode or invalid ctrl_state
Return gate label
Set gate label to name
Args:
name (str or None): label to assign unitary
Raises:
TypeError: name is not string or None.
Creates a unitary gate as `gate^exponent`.
Args:
exponent (float): Gate^exponent
Returns:
qiskit.extensions.UnitaryGate: To which `to_matrix` is self.to_matrix^exponent.
Raises:
CircuitError: If Gate is not unitary
Return a Numpy.array for the gate unitary matrix.
Returns:
np.ndarray: if the Gate subclass has a matrix definition.
Raises:
CircuitError: If a Gate subclass does not implement this method an
exception will be raised when this base class method is called.
Gate parameters should be int, float, or ParameterExpression
Unitary gate.
This code is part of Qiskit. (C) Copyright IBM 2017. This code is licensed under the Apache License, Version 2.0. You may obtain a copy of this license in the LICENSE.txt file in the root directory of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. Any modifications or derivative works of this code must retain this copyright notice, and modified files need to carry a notice indicating that they have been altered from the originals. Set higher priority than Numpy array and matrix classes pylint: disable=no-member pylint: disable=cyclic-import pylint: disable=cyclic-import Should be diagonalized because it's a unitary. Raise the diagonal entries to the specified power assert off-diagonal are 0 Then reconstruct the resulting gate. pylint: disable=cyclic-import [q[0], q[1]] -> [q[0]] -> [q[1]] [[q[0], q[1]], [r[0], r[1]]] -> [q[0], r[0]] -> [q[1], r[1]] [[q[0]], [r[0], r[1]]] -> [q[0], r[0]] -> [q[0], r[1]] [[q[0], q[1]], [r[0]]] -> [q[0], r[0]] -> [q[1], r[0]] expression has free parameters, we cannot validate it per default assume parameters must be real when bound | 3,827 | en | 0.634997 |
import numpy as np
def make_exp_kernel(L0):
def exp_kernel(x1, x2):
x1 = np.expand_dims(x1, 2) # Append a singleton dimension
x2 = x2.T
return np.exp(-np.mean(np.abs(x1 - x2), axis=1) / L0)
return exp_kernel
def make_sq_exp_kernel(L0):
def sq_exp_kernel(x1, x2):
x1 = np.expand_dims(x1, 2) # Append a singleton dimension
x2 = x2.T
return np.exp(-np.sum((x1 - x2)**2, axis=1) / (2 * L0**2))
return sq_exp_kernel
def weighted_neighbors_loss(train_data, valid_data, kernel):
"""Computes the negative log prob per data point."""
X_train, T_train = train_data
X_valid, T_valid = valid_data
weight_mat = kernel(X_valid, X_train)
label_probs = np.dot(weight_mat, T_train)
label_probs = label_probs / np.sum(label_probs, axis=1, keepdims=True)
mean_neg_log_prob = - np.mean(np.log(np.sum(label_probs * T_valid,
axis=1)), axis=0)
return mean_neg_log_prob
| cpu_ver/hypergrad/kernel_methods.py | 991 | Computes the negative log prob per data point.
Append a singleton dimension Append a singleton dimension | 106 | en | 0.431311 |
import os
import sys
import torch
import numpy as np
import datetime
import logging
import provider
import importlib
import shutil
import argparse
from pathlib import Path
from tqdm import tqdm
from data_utils.PCDLoader import *
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('training')
parser.add_argument('--use_cpu', action='store_true', default=False, help='use cpu mode')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device')
parser.add_argument('--batch_size', type=int, default=8, help='batch size in training')
parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]')
parser.add_argument('--num_category', default=12, type=int, help='training on real dataset')
parser.add_argument('--epoch', default=20, type=int, help='number of epoch in training')
parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training')
parser.add_argument('--log_dir', type=str, default=None, help='experiment root')
parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate')
parser.add_argument('--use_normals', action='store_true', default=False, help='use normals')
parser.add_argument('--process_data', action='store_true', default=False, help='save data offline')
parser.add_argument('--use_uniform_sample', action='store_true', default=False, help='use uniform sampiling')
parser.add_argument('--num_sparse_point', type=int, default=50, help='Point Number for domain loss')
parser.add_argument('--random_choose_sparse', type=bool, default=False, help='Random select num_sparse_point from [10,20,30,40,50]')
parser.add_argument('--SO3_Rotation', action='store_true', default=False, help='arbitrary rotation in SO3')
parser.add_argument('--DA_method', type=str, default="multi_coral_mmd", help='choose the DA loss function')
parser.add_argument('--alpha', type=float, default=10, help='set the value of classification loss')
parser.add_argument('--lamda', type=float, default=10, help='set the value of CORAL loss')
parser.add_argument('--beta', type=float, default=10, help='set the value of MMD loss')
return parser.parse_args()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def inplace_relu(m):
classname = m.__class__.__name__
if classname.find('ReLU') != -1:
m.inplace=True
def test(model, loader, num_class=12):
mean_correct = []
class_acc = np.zeros((num_class, 3))
classifier = model.eval()
for j, data in tqdm(enumerate(loader), total=len(loader)):
if not args.use_cpu:
points, target = data['pointcloud'].to(device).float(), data['category'].to(device)
points = points.transpose(2, 1)
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target.cpu()):
classacc = pred_choice[target == cat].eq(target[target == cat].long().data).cpu().sum()
class_acc[cat, 0] += classacc.item() / float(points[target == cat].size()[0])
class_acc[cat, 1] += 1
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
class_acc[:, 2] = class_acc[:, 0] / class_acc[:, 1]
class_acc = np.mean(class_acc[:, 2])
instance_acc = np.mean(mean_correct)
return instance_acc, class_acc
def main(args):
def log_string(str):
logger.info(str)
print(str)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
exp_dir = Path('./log/')
exp_dir.mkdir(exist_ok=True)
exp_dir = exp_dir.joinpath('classification')
exp_dir.mkdir(exist_ok=True)
if args.log_dir is None:
exp_dir = exp_dir.joinpath(timestr)
else:
exp_dir = exp_dir.joinpath(args.log_dir)
exp_dir.mkdir(exist_ok=True)
checkpoints_dir = exp_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = exp_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
'''DATA LOADING'''
log_string('Load dataset ...')
visual_data_path = 'data/visual_data_pcd/'
tactile_data_path = 'data/tactile_pcd_10_sampled_21.02/'
train_dataset = PCDPointCloudData(visual_data_path,
folder='Train',
sample_method='Voxel',
num_point=args.num_point,
sample=True,
rotation=False,
est_normal=args.use_normals)
test_dataset = PCDPointCloudData(visual_data_path,
folder='Test',
sample_method='Voxel',
num_point=args.num_point,
sample=True,
rotation=False,
est_normal=args.use_normals)
if args.random_choose_sparse is True:
raise NotImplementedError("Function Not Implemented") # Not Implement
# domain_adaptation_dataset = PCDPointCloudData(tactile_data_path, folder='Train',
# random_num=True,
# list_num_point=[10,20,30,40,50])
else:
domain_adaptation_dataset = PCDPointCloudData(tactile_data_path,
folder='Train',
sample_method='Voxel',
num_point=args.num_sparse_point,
sample=True,
rotation=False,
est_normal=args.use_normals)
trainDataLoader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)
domainAdaptationDataLoader = torch.utils.data.DataLoader(domain_adaptation_dataset, batch_size=args.batch_size, shuffle=True, num_workers=10, drop_last=True)
testDataLoader = torch.utils.data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=10)
'''Output middle layers'''
activation = {}
def get_activation(name):
def hook(model, input, output):
activation [name] = output.detach()
return hook
'''MODEL LOADING'''
num_class = args.num_category
model = importlib.import_module(args.model)
shutil.copy('./models/%s.py' % args.model, str(exp_dir))
shutil.copy('models/pointnet_cls.py', str(exp_dir))
shutil.copy('data_utils/PCDLoader.py', str(exp_dir))
shutil.copy('./train_realMulti-DA-Loss_classification.py', str(exp_dir))
classifier = model.get_model(num_class, normal_channel=args.use_normals)
criterion = model.get_loss()
if args.DA_method == "coral":
criterion_DA = model.get_coral_loss(DA_alpha=args.alpha, DA_lamda=args.lamda)
elif args.DA_method == "mmd":
criterion_DA = model.get_mmd_loss(DA_alpha=args.alpha, DA_lamda=args.lamda)
elif args.DA_method == "coral_mmd":
criterion_DA = model.get_coral_mmd_loss(DA_alpha=args.alpha, DA_beta=args.beta,
DA_lamda=args.lamda)
elif args.DA_method == "multi_coral_mmd":
criterion_DA = model.get_multiLayer_loss(DA_alpha=args.alpha, DA_beta=args.beta,
DA_lamda=args.lamda)
else:
raise NameError("Wrong input for DA method name!")
classifier.apply(inplace_relu)
if not args.use_cpu:
classifier = classifier.cuda()
criterion = criterion.cuda()
criterion_DA = criterion_DA.cuda()
# Load pretrained model with real dataset
try:
checkpoint = torch.load(str(exp_dir) + '/checkpoints/best_model.pth')
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
log_string('Use pretrain model')
except:
log_string('No existing model, starting training from scratch...')
start_epoch = 0
try:
min_loss = checkpoint['loss']
log_string('Loading model with DA loss %f' % min_loss)
except:
log_string('No DA loss found in the model')
min_loss = 10000.0
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate
)
else:
optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
global_epoch = 0
global_step = 0
best_instance_acc = 0.0
best_class_acc = 0.0
running_loss = 0.0
'''TRANING'''
logger.info('Start training...')
end_epoch = start_epoch + args.epoch
print("start epoch: ", start_epoch)
print("end epoch: ", end_epoch)
for epoch in range(start_epoch, end_epoch):
log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, end_epoch))
mean_correct = []
# Freeze Conv
for name, param in classifier.named_parameters():
if "feat" in name:
param.requires_grad = False
scheduler.step()
for batch_id, (data, data_DA) in tqdm(
enumerate(zip(trainDataLoader,domainAdaptationDataLoader), 0),
total=len(trainDataLoader),
smoothing=0.9):
optimizer.zero_grad()
points, target = data['pointcloud'].to(device).float(), data['category'].to(device)
points_DA = data_DA['pointcloud'].to(device).float()
points = points.data.cpu().numpy()
points = provider.random_point_dropout(points)
points[:, :, 0:3] = provider.random_scale_point_cloud(points[:, :, 0:3])
points[:, :, 0:3] = provider.shift_point_cloud(points[:, :, 0:3])
points = torch.Tensor(points)
points = points.transpose(2, 1)
points_DA = points_DA.data.cpu().numpy()
points_DA = provider.random_point_dropout(points_DA)
points_DA[:, :, 0:3] = provider.random_scale_point_cloud(points_DA[:, :, 0:3])
points_DA[:, :, 0:3] = provider.shift_point_cloud(points_DA[:, :, 0:3])
points_DA = torch.Tensor(points_DA)
points_DA = points_DA.transpose(2, 1)
if not args.use_cpu:
points, target = points.cuda(), target.cuda()
points_DA = points_DA.cuda()
pred, trans_feat = classifier(points)
# Multi-layer Loss
###############################################################################################
# FC1
classifier.fc1.register_forward_hook(get_activation('fc1'))
output_dense_1 = classifier(points)
feature_dense_1 = activation['fc1']
# print(feature_dense_1.size())
classifier.fc1.register_forward_hook(get_activation('fc1'))
output_DA_1 = classifier(points_DA)
feature_DA_1 = activation['fc1']
# print(feature_DA_1.size())
# FC2
classifier.fc2.register_forward_hook(get_activation('fc2'))
output_dense_2 = classifier(points)
feature_dense_2 = activation['fc2']
# print(feature_dense_2.size())
classifier.fc2.register_forward_hook(get_activation('fc2'))
output_DA_2 = classifier(points_DA)
feature_DA_2 = activation['fc2']
# print(feature_DA_2.size())
# change the loss here for testing!!!
DA_loss, loss = criterion_DA(pred, target.long(), trans_feat,
feature_dense_1, feature_DA_1, feature_dense_2, feature_DA_2)
################################################################################################
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
loss.backward()
optimizer.step()
global_step += 1
# Print the loss
running_loss += DA_loss.item()
if batch_id % 100 == 99:
# log_string("fc1 {}".format(classifier.fc1.weight.grad))
# log_string("fc2 {}".format(classifier.fc2.weight.grad))
# log_string("fc3 {}".format(classifier.fc3.weight.grad))
# print("Training loss {} ".format(loss.item()/100))
calculate_loss = running_loss/100
log_string("Running DA loss {} ".format(calculate_loss))
if calculate_loss < min_loss:
min_loss = calculate_loss
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best_model.pth'
log_string('Saving at %s' % savepath)
state = {
'epoch': epoch,
# 'instance_acc': instance_acc,
# 'class_acc': class_acc,
'loss': calculate_loss,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
running_loss = 0.0
train_instance_acc = np.mean(mean_correct)
log_string('Train Instance Accuracy: %f' % train_instance_acc)
with torch.no_grad():
instance_acc, class_acc = test(classifier.eval(), testDataLoader, num_class=num_class)
if (instance_acc >= best_instance_acc):
best_instance_acc = instance_acc
best_epoch = epoch + 1
if (class_acc >= best_class_acc):
best_class_acc = class_acc
log_string('Test Instance Accuracy: %f, Class Accuracy: %f' % (instance_acc, class_acc))
log_string('Best Instance Accuracy: %f, Class Accuracy: %f' % (best_instance_acc, best_class_acc))
if (instance_acc >= best_instance_acc):
# logger.info('Save model...')
# # print("This is a better model, but the model will not be saved")
logger.info('Model will not be saved with vision validation')
# savepath = str(checkpoints_dir) + '/best_model.pth'
# log_string('Saving at %s' % savepath)
# state = {
# 'epoch': best_epoch,
# 'instance_acc': instance_acc,
# 'class_acc': class_acc,
# 'model_state_dict': classifier.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# }
# torch.save(state, savepath)
global_epoch += 1
logger.info('End of training...')
if __name__ == '__main__':
# torch.cuda.empty_cache()
args = parse_args()
main(args)
| train_realMulti-DA-Loss_classification.py | 16,334 | PARAMETERS
Not Implement domain_adaptation_dataset = PCDPointCloudData(tactile_data_path, folder='Train', random_num=True, list_num_point=[10,20,30,40,50]) Load pretrained model with real dataset Freeze Conv Multi-layer Loss FC1 print(feature_dense_1.size()) print(feature_DA_1.size()) FC2 print(feature_dense_2.size()) print(feature_DA_2.size()) change the loss here for testing!!! Print the loss log_string("fc1 {}".format(classifier.fc1.weight.grad)) log_string("fc2 {}".format(classifier.fc2.weight.grad)) log_string("fc3 {}".format(classifier.fc3.weight.grad)) print("Training loss {} ".format(loss.item()/100)) 'instance_acc': instance_acc, 'class_acc': class_acc, logger.info('Save model...') print("This is a better model, but the model will not be saved") savepath = str(checkpoints_dir) + '/best_model.pth' log_string('Saving at %s' % savepath) state = { 'epoch': best_epoch, 'instance_acc': instance_acc, 'class_acc': class_acc, 'model_state_dict': classifier.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), } torch.save(state, savepath) torch.cuda.empty_cache() | 1,202 | en | 0.316942 |
# Copyright 2020 Determined AI. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import pathlib
from typing import List, Optional
import yogadl
class LMDBDataRef(yogadl.DataRef):
def __init__(self, cache_filepath: pathlib.Path):
self._lmdb_access = yogadl.LmdbAccess(lmdb_path=cache_filepath)
self._keys = self._lmdb_access.get_keys()
def stream(
self,
start_offset: int = 0,
shuffle: bool = False,
skip_shuffle_at_epoch_end: bool = False,
shuffle_seed: Optional[int] = None,
shard_rank: int = 0,
num_shards: int = 1,
drop_shard_remainder: bool = False,
) -> yogadl.Stream:
"""
Create a stream from a cache.
"""
if shuffle and not skip_shuffle_at_epoch_end:
assert shuffle_seed is not None, (
"Please set `shuffle_seed` if enabling `shuffle` and not enabling "
"`skip_shuffle_at_epoch_end`."
)
generated_keys = self._shard_keys(
shard_rank=shard_rank,
num_shards=num_shards,
drop_shard_remainder=drop_shard_remainder,
)
generator_from_keys = yogadl.GeneratorFromKeys(
keys=generated_keys,
initial_offset=start_offset,
read_val_from_key_fn=self._lmdb_access.read_value_by_key,
shuffle_at_start=shuffle,
shuffle_after_epoch=shuffle and not skip_shuffle_at_epoch_end,
shuffle_seed=shuffle_seed,
)
return yogadl.Stream(
iterator_fn=generator_from_keys.instantiate_generator,
length=len(generated_keys),
output_types=self._lmdb_access.get_types(),
output_shapes=self._lmdb_access.get_shapes(),
)
def __len__(self) -> int:
return len(self._keys)
def _shard_keys(
self, shard_rank: int, num_shards: int, drop_shard_remainder: bool
) -> List[bytes]:
generated_keys = yogadl.shard_keys(
keys=self._keys,
shard_index=shard_rank,
num_shards=num_shards,
sequential=False,
drop_shard_remainder=drop_shard_remainder,
)
return generated_keys
| yogadl/dataref/_local_lmdb_dataref.py | 2,832 | Create a stream from a cache.
Copyright 2020 Determined AI. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== | 683 | en | 0.820085 |
# coding: utf-8
import pprint
import re # noqa: F401
import six
class UserNotificationSubject(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'str',
'url': 'str',
'latest_comment_url': 'str',
'type': 'str'
}
attribute_map = {
'title': 'title',
'url': 'url',
'latest_comment_url': 'latest_comment_url',
'type': 'type'
}
def __init__(self, title=None, url=None, latest_comment_url=None, type=None): # noqa: E501
"""UserNotificationSubject - a model defined in Swagger""" # noqa: E501
self._title = None
self._url = None
self._latest_comment_url = None
self._type = None
self.discriminator = None
if title is not None:
self.title = title
if url is not None:
self.url = url
if latest_comment_url is not None:
self.latest_comment_url = latest_comment_url
if type is not None:
self.type = type
@property
def title(self):
"""Gets the title of this UserNotificationSubject. # noqa: E501
:return: The title of this UserNotificationSubject. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this UserNotificationSubject.
:param title: The title of this UserNotificationSubject. # noqa: E501
:type: str
"""
self._title = title
@property
def url(self):
"""Gets the url of this UserNotificationSubject. # noqa: E501
:return: The url of this UserNotificationSubject. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this UserNotificationSubject.
:param url: The url of this UserNotificationSubject. # noqa: E501
:type: str
"""
self._url = url
@property
def latest_comment_url(self):
"""Gets the latest_comment_url of this UserNotificationSubject. # noqa: E501
:return: The latest_comment_url of this UserNotificationSubject. # noqa: E501
:rtype: str
"""
return self._latest_comment_url
@latest_comment_url.setter
def latest_comment_url(self, latest_comment_url):
"""Sets the latest_comment_url of this UserNotificationSubject.
:param latest_comment_url: The latest_comment_url of this UserNotificationSubject. # noqa: E501
:type: str
"""
self._latest_comment_url = latest_comment_url
@property
def type(self):
"""Gets the type of this UserNotificationSubject. # noqa: E501
:return: The type of this UserNotificationSubject. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this UserNotificationSubject.
:param type: The type of this UserNotificationSubject. # noqa: E501
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(UserNotificationSubject, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserNotificationSubject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| gitee/models/user_notification_subject.py | 4,996 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
UserNotificationSubject - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the latest_comment_url of this UserNotificationSubject. # noqa: E501
:return: The latest_comment_url of this UserNotificationSubject. # noqa: E501
:rtype: str
Sets the latest_comment_url of this UserNotificationSubject.
:param latest_comment_url: The latest_comment_url of this UserNotificationSubject. # noqa: E501
:type: str
Gets the title of this UserNotificationSubject. # noqa: E501
:return: The title of this UserNotificationSubject. # noqa: E501
:rtype: str
Sets the title of this UserNotificationSubject.
:param title: The title of this UserNotificationSubject. # noqa: E501
:type: str
Returns the model properties as a dict
Returns the string representation of the model
Gets the type of this UserNotificationSubject. # noqa: E501
:return: The type of this UserNotificationSubject. # noqa: E501
:rtype: str
Sets the type of this UserNotificationSubject.
:param type: The type of this UserNotificationSubject. # noqa: E501
:type: str
Gets the url of this UserNotificationSubject. # noqa: E501
:return: The url of this UserNotificationSubject. # noqa: E501
:rtype: str
Sets the url of this UserNotificationSubject.
:param url: The url of this UserNotificationSubject. # noqa: E501
:type: str
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 | 1,547 | en | 0.682969 |
import datetime as dt
import matplotlib.pyplot as plt
import lifetimes
import numpy as np
import os
import pandas as pd
import seaborn as sns
def numcard(x):
return x.nunique(), len(x)
def todateclean(x):
return pd.to_datetime(x, errors='coerce').dt.date.astype('datetime64')
"""
- info, shape, dtypes
- df.isnull().sum() #Check for null counts/ value_counts()
- Check for supposed imputed values (are there suspicious values of 0, like for Age. )
- change zeros to nans where appropriate
- Imputation of missing values
- handle stringified json
- df.dtypes # in case obj to (df.colname = df.colname.astype("category"))
- df['colname'] = pd.to_datetime(df['colname']).dt.date
- df.drop("colname", axis=1) # drop columns
- How balanced are the outcomes?
X = df.drop("diagnosis", axis=1) # just saying which axis again
Y = df["diagnosis"] # this is just a series now
col = X.columns # if we do type(col), it's an Index
X.isnull().sum() # this covers every column in the df.
def rangenorm(x):
return (x - x.mean())/(x.max() - x.min())
le = LabelEncoder()
le.fit(Y_norm)
"""
df = pd.read_csv("./ignoreland/onlineretail.csv")
df.info()
df.apply(lambda x: numcard(x))
datecols = ['InvoiceDate']
df.loc[:, datecols] = df.loc[:,datecols].apply(lambda x: todateclean(x))
dfnew = df[(df.Quantity>0) & (df.CustomerID.isnull()==False)]
dfnew['amt'] = dfnew['Quantity'] * dfnew['UnitPrice']
dfnew.describe()
from lifetimes.plotting import *
from lifetimes.utils import *
observation_period_end = '2011-12-09'
monetary_value_col = 'amt'
modeldata = summary_data_from_transaction_data(dfnew,
'CustomerID',
'InvoiceDate',
monetary_value_col=monetary_value_col,
observation_period_end=observation_period_end)
modeldata.head()
modeldata.info() # 4 floats.
# Eyeball distribution of frequency (calculated)
modeldata['frequency'].plot(kind='hist', bins=50)
print(modeldata['frequency'].describe())
print(modeldata['recency'].describe())
print(sum(modeldata['frequency'] == 0)/float(len(modeldata)))
##### Lec21
from lifetimes import BetaGeoFitter
# similar to lifelines
bgf = BetaGeoFitter(penalizer_coef=0.0) # no regularization param.
bgf.fit(modeldata['frequency'], modeldata['recency'], modeldata['T'])
print(bgf)
# See https://www.youtube.com/watch?v=guj2gVEEx4s and
# https://www.youtube.com/watch?v=gx6oHqpRgpY
## residual lifetime value is more useful construct
from lifetimes.plotting import plot_frequency_recency_matrix
plot_frequency_recency_matrix(bgf)
from lifetimes.plotting import plot_probability_alive_matrix
plot_probability_alive_matrix(bgf)
# lec 24:
# set an outer time boundary and predict cumulative purchases by that time
t = 10 # from now until now+t periods
modeldata['predicted_purchases'] = \
bgf.conditional_expected_number_of_purchases_up_to_time(t,
modeldata['frequency'],
modeldata['recency'],
modeldata['T'])
modeldata.sort_values(by='predicted_purchases').tail(5)
modeldata.sort_values(by='predicted_purchases').head(5)
# lec 25: validation of model
from lifetimes.plotting import plot_period_transactions
plot_period_transactions(bgf) # this plot shows very clearly the model performance
# in terms of transaction volume fit
# Lec 26: splitting into train and test (by time period)
summary_cal_holdout = calibration_and_holdout_data(df,
'CustomerID',
'InvoiceDate',
calibration_period_end='2011-06-08',
observation_period_end='2011-12-09')
summary_cal_holdout.head()
bgf.fit(summary_cal_holdout['frequency_cal'],
summary_cal_holdout['recency_cal'],
summary_cal_holdout['T_cal'])
from lifetimes.plotting import plot_calibration_purchases_vs_holdout_purchases
plot_calibration_purchases_vs_holdout_purchases(bgf, summary_cal_holdout)
from lifetimes.plotting import plot_history_alive
days_since_birth = 365
fig = plt.figure(figsize=(12,8))
id = 14621 # choose a customer id
sp_trans = df.loc[df['CustomerID'] == id] # specific customer's covariates
plot_history_alive(bgf, days_since_birth, sp_trans, 'InvoiceDate')
# Lec28: Subsetting to customers who repurchase.
returning_customers_summary = modeldata[modeldata['frequency']>0]
returning_customers_summary.head()
returning_customers_summary.shape
# Lec 29: gamma-gamma model for LTV
# Note: good practice to confirm small/no apparent corr for frequency and mean trxn value
# Rev per trxn: predict total monetary value.
# The Beta param for the gamma model of total spend is itself assumed gamma distributed
# that is where the name comes from.
# teh expectation of total spend for person i is calculated in empirical-bayes fashion, as a weighted
# mean of population average and the sample mean for person i.
# eq 5 in http://www.brucehardie.com/notes/025/gamma_gamma.pdf shows the arithmetic
# https://antonsruberts.github.io/lifetimes-CLV/ also great additional code.
# derivation here: http://www.brucehardie.com/notes/025/gamma_gamma.pdf
# Output of ggf fitter:
# p = the 'alpha' param in the gamma dist: E(Z|p, v) = p/v. Alpha adds upon convolution.
# q = the alpha param in the gamma dist of v -- v is gamma(q, gam) in the pop
# v = the 'beta' param in gamma dist. constant upon convolution.
# -- Note that v varies among customers (ie, is gamma distributed)
from lifetimes import GammaGammaFitter
ggf = GammaGammaFitter(penalizer_coef=0.0)
ggf.fit(returning_customers_summary['frequency'],
returning_customers_summary['monetary_value'])
ggf.summary
ggf.conditional_expected_average_profit(modeldata['frequency'],
modeldata['monetary_value'])
# cond_exp_avg_profit => gives prediction of mean trxn value.
a0 = returning_customers_summary['monetary_value'].shape[0] # 2790 customers
# Total spend:
a1 = returning_customers_summary['monetary_value'].sum()
# Total time units (here, days) with purchase:
a2 = returning_customers_summary['frequency'].sum()
# Mean monetary value (over all purchase days), roughly equal to estimated v
returning_customers_summary['monetary_value'].mean()
ggf.summary
p_here = ggf.summary.iloc[0,0]
q_here = ggf.summary.iloc[1,0]
v_here = ggf.summary.iloc[2,0] # model says 486; empirical average is 477.
money_per_customer = a1/a0
###############
# review, per documentation:
bgf.summary
# r, alpha = shape, scale for gamma dist that represents sum (convolution) of purchase rates
# a = alpha param for beta dist of churn
# b = beta param for beta dist of churn
x = np.random.gamma(.784, 49.28,10000) # r, alpha, n
bgf.summary.loc["a",:][0]/ (bgf.summary.loc["b",:][0] + bgf.summary.loc["a",:][0])
###################################
# lec31: other models
dfnew.dtypes
dfnew_train = dfnew[dfnew.InvoiceDate < '2011-11-09']
dfnew_test = dfnew[dfnew.InvoiceDate >= '2011-11-09']
dfnew_test.shape
dfnew_train.shape
maxdate = dfnew_train.InvoiceDate.max()
mindate = dfnew_train.InvoiceDate.min()
dfnew_train['duration'] = (maxdate - dfnew_train.InvoiceDate)/np.timedelta64(1,'D')
dfsum1 = dfnew_train.groupby(['CustomerID'])['duration'].min().reset_index()
dfsum1.rename(columns = {'duration':'lasttime'}, inplace=True) # time from lasttime to now
dfsum2 = dfnew_train.groupby(['CustomerID'])['duration'].max().reset_index()
dfsum2.rename(columns = {'duration':'firsttime'}, inplace=True) # time from firsttime to now
dfnew_train['freq'] = 1
dfsum3 = dfnew_train.groupby(['CustomerID'])['freq'].sum().reset_index() # count of transactions by customer
dfnew_train['freq3m'] = 1
dfsum4 = dfnew_train[dfnew_train['duration'] < 91].groupby(['CustomerID'])['freq3m'].sum().reset_index()
# now let's merge the 3 customer-level datasets together.
# pd.concat uses indexes as the join keys,
from functools import reduce
dfs = [dfsum1, dfsum2, dfsum3, dfsum4]
dfsum = reduce(lambda left, right: pd.merge(left, right, on=['CustomerID'], how='outer'), dfs)
dfsum.shape
[_ for _ in map(lambda x: x.shape, dfs)]
dfsum.head()
###################
other_data = pd.read_csv("./ignoreland/oth.csv")
other_data.head()
dfsum = pd.merge(dfsum, other_data, on=['CustomerID'], how='left')
dfnew_test['target'] = 1
dfsum_target = dfnew_test.groupby(['CustomerID'])['target'].sum().reset_index()
dfsum = pd.merge(dfsum, dfsum_target, on=['CustomerID'], how='left')
dfsum = dfsum.fillna(0).sort_values(['target'], ascending=False)
list(dfsum.columns)
# Lec 35 Xgboost
"""
reduce(Create tree, use tree to predict residuals, add.)
lightgbm is a faster implementation
"""
# lec36:
# Use xgboost to model the count of transactions per customer
import xgboost
from sklearn.model_selection import train_test_split
xgb_model = xgboost.XGBRegressor(n_estimators=2000, objective='reg:squarederror', max_depth=5)
predictors = ['lasttime', 'firsttime', 'freq', 'freq3m', 'score', 'discount']
X = dfsum[predictors]
y = dfsum['target']
# Split x, x, y, y | train, test; give test frac and random state
x_train, x_valid, y_train, y_valid = train_test_split(X, y, test_size=0.32, random_state=867)
xgb_model.fit(x_train, y_train)
pred = xgb_model.predict(x_valid) # vector of predicted
err = (pred - y_valid)**2 # squared errors
mse = err.sum()/len(err)
rmse = np.sqrt(mse)
from xgboost import plot_importance
x = list(zip(predictors, xgb_model.feature_importances_))
x.sort(key=lambda x: -x[1])
x
plot_importance(xgb_model)
# https://towardsdatascience.com/interpretable-machine-learning-with-xgboost-9ec80d148d27
### Some global measures of xgboost feature importance:
# weight: number of times feature is used to split data (over all trees)
# cover: weight, weighted by data points being touched by those splits
# gain: mean training loss reduction (reduction in test-train) when the feature is used.
# argsort here returns the indices of the (reverse-sorted) feature importance values.
# Useful for grabbing index values and then working with arbitrarily zipped other lists (as I did above)
sorted_idx = np.argsort(xgb_model.feature_importances_)[::-1]
for _ in sorted_idx:
print([x_train.columns[_], xgb_model.feature_importances_[_]])
[_ for _ in map(lambda x: xgb_model.get_booster().get_score(importance_type=x),
['gain','weight','cover','total_gain','total_cover'])]
def importances(model, lst):
output = {}
for x in lst:
output[x] = model.get_booster().get_score(importance_type=x).values()
return pd.concat([pd.Series(model.get_booster().feature_names), pd.DataFrame(output, columns=lst)],
axis=1)
aa = importances(xgb_model,['gain','weight','cover','total_gain','total_cover'])
aa
pd.concat([pd.Series(xgb_model.get_booster().feature_names), aa], axis=1)
##################
# using lightgbm:
import lightgbm as lgb
lgbparams = {
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'mse',
'max_depth': 6,
'learning_rate': 0.02,
}
X1, X2, y1, y2 = train_test_split(X, y, test_size=0.32, random_state=867)
x_train, x_valid, y_train, y_valid = train_test_split(X1, y1, test_size=0.1, random_state=867)
x_train = x_train[predictors]
x_valid = x_valid[predictors]
d_train = lgb.Dataset(x_train, label=y_train)
d_valid = lgb.Dataset(x_valid, label=y_valid)
watchlist = [d_valid]
n_estimators = 2000
lightmodel = lgb.train(lgbparams, d_train, n_estimators, watchlist, verbose_eval=1)
importancelist = ['gain','split']
lightmodel.feature_importance(importance_type=importancelist[0])
importancdf = pd.DataFrame(pd.Series(predictors), columns=['feature'])
importancedf = reduce(lambda left, right: pd.concat([left, right], axis=1),
[pd.Series(lightmodel.feature_importance(_)) for _ in importancelist])
importancedf.corr()
"""
frequency = number of periods in which a non-first purchase was made
T = age in same units of each customer
recency = period[last purchase] - period[first purchase]
monetary_value = sum(money)/(frequency+1)
# use utility functions to aggregate into useable format.
# https://lifetimes.readthedocs.io/en/latest/More%20examples%20and%20recipes.html
# sql examples for aggregating into RFM and doing holdout split.
"""
"""
Also, per brucehardie,
The integrated (function of 2 functions) nature of these problems yields to
The gaussian hypergeometric function trick for evaluating the double integral.
"""
| clvscript00.py | 12,716 | 4 floats. Eyeball distribution of frequency (calculated) Lec21 similar to lifelines no regularization param. See https://www.youtube.com/watch?v=guj2gVEEx4s and https://www.youtube.com/watch?v=gx6oHqpRgpY residual lifetime value is more useful construct lec 24: set an outer time boundary and predict cumulative purchases by that time from now until now+t periods lec 25: validation of model this plot shows very clearly the model performance in terms of transaction volume fit Lec 26: splitting into train and test (by time period) choose a customer id specific customer's covariates Lec28: Subsetting to customers who repurchase. Lec 29: gamma-gamma model for LTV Note: good practice to confirm small/no apparent corr for frequency and mean trxn value Rev per trxn: predict total monetary value. The Beta param for the gamma model of total spend is itself assumed gamma distributed that is where the name comes from. teh expectation of total spend for person i is calculated in empirical-bayes fashion, as a weighted mean of population average and the sample mean for person i. eq 5 in http://www.brucehardie.com/notes/025/gamma_gamma.pdf shows the arithmetic https://antonsruberts.github.io/lifetimes-CLV/ also great additional code. derivation here: http://www.brucehardie.com/notes/025/gamma_gamma.pdf Output of ggf fitter: p = the 'alpha' param in the gamma dist: E(Z|p, v) = p/v. Alpha adds upon convolution. q = the alpha param in the gamma dist of v -- v is gamma(q, gam) in the pop v = the 'beta' param in gamma dist. constant upon convolution. -- Note that v varies among customers (ie, is gamma distributed) cond_exp_avg_profit => gives prediction of mean trxn value. 2790 customers Total spend: Total time units (here, days) with purchase: Mean monetary value (over all purchase days), roughly equal to estimated v model says 486; empirical average is 477. review, per documentation: r, alpha = shape, scale for gamma dist that represents sum (convolution) of purchase rates a = alpha param for beta dist of churn b = beta param for beta dist of churn r, alpha, n lec31: other models time from lasttime to now time from firsttime to now count of transactions by customer now let's merge the 3 customer-level datasets together. pd.concat uses indexes as the join keys, Lec 35 Xgboost lec36: Use xgboost to model the count of transactions per customer Split x, x, y, y | train, test; give test frac and random state vector of predicted squared errors https://towardsdatascience.com/interpretable-machine-learning-with-xgboost-9ec80d148d27 Some global measures of xgboost feature importance: weight: number of times feature is used to split data (over all trees) cover: weight, weighted by data points being touched by those splits gain: mean training loss reduction (reduction in test-train) when the feature is used. argsort here returns the indices of the (reverse-sorted) feature importance values. Useful for grabbing index values and then working with arbitrarily zipped other lists (as I did above) using lightgbm: | 3,036 | en | 0.850282 |
# Copyright 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import fcntl
import hashlib
import os
import shutil
import subprocess
import sys
# Allow use of this module even if termcolor is missing. There are many
# standalone python scripts in build_tools that can be run directly without
# PYTHONPATH set (i.e. not via build/python_wrapper that adds this path.
# TODO(sbc): we should probably just assume that all the module dependencies
# are present.
try:
import termcolor
except ImportError:
termcolor = None
from webports import error, paths
GS_URL = 'http://storage.googleapis.com/'
GS_BUCKET = 'webports'
GS_MIRROR_URL = '%s%s/mirror' % (GS_URL, GS_BUCKET)
# Require the latest version of the NaCl SDK. webports is built
# and tested against the pepper_canary release. To build aginst older
# versions of the SDK use the one of the pepper_XX branches (or use
# --skip-sdk-version-check).
MIN_SDK_VERSION = 49
arch_to_pkgarch = {
'x86_64': 'x86-64',
'i686': 'i686',
'arm': 'arm',
'pnacl': 'pnacl',
'emscripten': 'emscripten',
'le32': 'le32'
}
# Inverse of arch_to_pkgarch
pkgarch_to_arch = {v: k for k, v in arch_to_pkgarch.items()}
LOG_ERROR = 0
LOG_WARN = 1
LOG_INFO = 2
LOG_VERBOSE = 3
LOG_TRACE = 4
ELF_MAGIC = '\x7fELF'
PEXE_MAGIC = 'PEXE'
log_level = LOG_INFO
color_mode = 'auto'
def colorize(message, color):
if termcolor and colorize.enabled:
return termcolor.colored(message, color)
else:
return message
def check_stdout_for_color_support():
if color_mode == 'auto':
colorize.enabled = sys.stdout.isatty()
def is_elf_file(filename):
if os.path.islink(filename):
return False
with open(filename) as f:
header = f.read(4)
return header == ELF_MAGIC
def is_pexe_file(filename):
if os.path.islink(filename):
return False
with open(filename) as f:
header = f.read(4)
return header == PEXE_MAGIC
def memoize(f):
"""Memoization decorator for functions taking one or more arguments."""
class Memo(dict):
def __init__(self, f):
super(Memo, self).__init__()
self.f = f
def __call__(self, *args):
return self[args]
def __missing__(self, key):
ret = self[key] = self.f(*key)
return ret
return Memo(f)
def set_verbose(enabled):
if enabled:
set_log_level(LOG_VERBOSE)
else:
set_log_level(LOG_INFO)
def set_log_level(verbosity):
global log_level
log_level = verbosity
def log(message, verbosity=LOG_INFO):
"""Log a message to the console (stdout)."""
if log_level < verbosity:
return
sys.stdout.write(str(message) + '\n')
sys.stdout.flush()
def log_heading(message, suffix=''):
"""Log a colored/highlighted message with optional suffix."""
if colorize.enabled:
log(colorize(message, 'green') + suffix)
else:
if log_level > LOG_WARN:
# When running in verbose mode make sure heading standout
log('###################################################################')
log(message + suffix)
log('###################################################################')
else:
log(message + suffix)
def warn(message):
log('warning: ' + message, LOG_WARN)
def trace(message):
log(message, LOG_TRACE)
def log_verbose(message):
log(message, LOG_VERBOSE)
def find_in_path(command_name):
"""Search user's PATH for a given executable.
Returns:
Full path to executable.
"""
extensions = ('',)
if not os.path.splitext(command_name)[1] and os.name == 'nt':
extensions = ('.bat', '.com', '.exe')
for path in os.environ.get('PATH', '').split(os.pathsep):
for ext in extensions:
full_name = os.path.join(path, command_name + ext)
if os.path.exists(full_name) and os.path.isfile(full_name):
return full_name
raise error.Error('command not found: %s' % command_name)
def download_file(filename, url):
"""Download a file from a given URL.
Args:
filename: the name of the file to download the URL to.
url: then URL to fetch.
"""
temp_filename = filename + '.partial'
# Ensure curl is in user's PATH
find_in_path('curl')
curl_cmd = ['curl', '--fail', '--location', '--stderr', '-', '-o',
temp_filename]
if hasattr(sys.stdout, 'fileno') and os.isatty(sys.stdout.fileno()):
# Add --progress-bar but only if stdout is a TTY device.
curl_cmd.append('--progress-bar')
else:
# otherwise suppress status output, since curl always assumes its
# talking to a TTY and writes \r and \b characters. But add
# --show-error so that when curl fails it at least prints something.
curl_cmd += ['--silent', '--show-error']
curl_cmd.append(url)
if log_level > LOG_WARN:
log('Downloading: %s [%s]' % (url, filename))
else:
log('Downloading: %s' % url.replace(GS_URL, ''))
try:
subprocess.check_call(curl_cmd)
except subprocess.CalledProcessError as e:
raise error.Error('Error downloading file: %s' % str(e))
os.rename(temp_filename, filename)
def check_stamp(filename, contents=None):
"""Check that a given stamp file is up-to-date.
Returns: False is the file does not exists or is older that that given
comparison file, or does not contain the given contents. True otherwise.
"""
if not os.path.exists(filename):
return False
if contents is not None:
with open(filename) as f:
if not f.read().startswith(contents):
return False
return True
@memoize
def get_sdk_root():
"""Returns the root of the currently configured Native Client SDK."""
root = os.environ.get('NACL_SDK_ROOT')
if root is None:
local_sdk_root = os.path.join(paths.OUT_DIR, 'nacl_sdk')
if os.path.exists(local_sdk_root):
root = local_sdk_root
else:
raise error.Error('$NACL_SDK_ROOT not set')
if sys.platform == "cygwin":
root = root.replace('\\', '/')
return root
@memoize
def get_emscripten_root():
emscripten = os.environ.get('EMSCRIPTEN')
if emscripten is None:
local_root = os.path.join(paths.OUT_DIR, 'emsdk', 'emscripten')
if os.path.exists(local_root):
emscripten = local_root
else:
raise error.Error('$EMSCRIPTEN not set and %s does not exist.' %
local_root)
if not os.path.isdir(emscripten):
raise error.Error('$EMSCRIPTEN environment variable does not point'
' to a directory: %s' % emscripten)
return emscripten
def setup_emscripten():
if 'EMSCRIPTEN' in os.environ:
return
local_root = get_emscripten_root()
os.environ['EMSCRIPTEN'] = local_root
os.environ['EM_CONFIG'] = os.path.join(
os.path.dirname(local_root), '.emscripten')
try:
find_in_path('node')
except error.Error:
node_bin = os.path.join(paths.OUT_DIR, 'node', 'bin')
if not os.path.isdir(node_bin):
raise error.Error(
'node not found in path and default path not found: %s' % node_bin)
os.environ['PATH'] += ':' + node_bin
find_in_path('node')
@memoize
def get_sdk_version():
"""Returns the version (as a string) of the current SDK."""
getos = os.path.join(get_sdk_root(), 'tools', 'getos.py')
version = subprocess.check_output([getos, '--sdk-version']).strip()
return version
def check_sdk_version(version):
"""Returns True if the currently configured SDK is 'version' or above."""
return int(get_sdk_version()) >= int(version)
@memoize
def get_sdk_revision():
"""Returns the revision of the currently configured Native Client SDK."""
getos = os.path.join(get_sdk_root(), 'tools', 'getos.py')
version = subprocess.check_output([getos, '--sdk-revision']).strip()
return int(version)
@memoize
def get_platform():
"""Returns the current platform name according getos.py."""
getos = os.path.join(get_sdk_root(), 'tools', 'getos.py')
platform = subprocess.check_output([getos]).strip()
return platform
@memoize
def get_toolchain_root(config):
"""Returns the toolchain folder for a given NaCl toolchain."""
if config.toolchain == 'emscripten':
return get_emscripten_root()
platform = get_platform()
if config.toolchain in ('pnacl', 'clang-newlib'):
tc_dir = os.path.join('%s_pnacl' % platform)
else:
tc_arch = {'arm': 'arm', 'i686': 'x86', 'x86_64': 'x86'}[config.arch]
tc_dir = '%s_%s_%s' % (platform, tc_arch, config.libc)
return os.path.join(get_sdk_root(), 'toolchain', tc_dir)
@memoize
def get_install_root(config):
"""Returns the install location given a build configuration."""
tc_dir = get_toolchain_root(config)
if config.toolchain == 'emscripten':
return os.path.join(tc_dir, 'system', 'local')
if config.toolchain == 'pnacl':
tc_dir = os.path.join(tc_dir, 'le32-nacl')
else:
tc_dir = os.path.join(tc_dir, '%s-nacl' % config.arch)
return os.path.join(tc_dir, 'usr')
@memoize
def get_install_stamp_root(config):
"""Returns the installation metadata folder for the give configuration."""
tc_root = get_install_root(config)
return os.path.join(tc_root, 'var', 'lib', 'npkg')
@memoize
def get_strip(config):
tc_dir = get_toolchain_root(config)
if config.toolchain == 'pnacl':
strip = os.path.join(tc_dir, 'bin', 'pnacl-strip')
else:
strip = os.path.join(tc_dir, 'bin', '%s-nacl-strip' % config.arch)
assert os.path.exists(strip), 'strip executable not found: %s' % strip
return strip
def get_install_stamp(package_name, config):
"""Returns the filename of the install stamp for for a given package.
This file is written at install time and contains metadata
about the installed package.
"""
root = get_install_stamp_root(config)
return os.path.join(root, package_name + '.info')
def get_list_file(package_name, config):
"""Returns the filename of the list of installed files for a given package.
This file is written at install time.
"""
root = get_install_stamp_root(config)
return os.path.join(root, package_name + '.list')
def is_installed(package_name, config, stamp_content=None):
"""Returns True if the given package is installed."""
stamp = get_install_stamp(package_name, config)
result = check_stamp(stamp, stamp_content)
return result
def check_sdk_root():
"""Check validity of NACL_SDK_ROOT."""
root = get_sdk_root()
if not os.path.isdir(root):
raise error.Error('$NACL_SDK_ROOT does not exist: %s' % root)
landmark = os.path.join(root, 'tools', 'getos.py')
if not os.path.exists(landmark):
raise error.Error("$NACL_SDK_ROOT (%s) doesn't look right. "
"Couldn't find landmark file (%s)" % (root, landmark))
if not check_sdk_version(MIN_SDK_VERSION):
raise error.Error(
'This version of webports requires at least version %s of\n'
'the NaCl SDK. The version in $NACL_SDK_ROOT is %s. If you want\n'
'to use webports with an older version of the SDK please checkout\n'
'one of the pepper_XX branches (or run with\n'
'--skip-sdk-version-check).' % (MIN_SDK_VERSION, get_sdk_version()))
def hash_file(filename):
"""Return the SHA1 (in hex format) of the contents of the given file."""
block_size = 100 * 1024
sha1 = hashlib.sha1()
with open(filename) as f:
while True:
data = f.read(block_size)
if not data:
break
sha1.update(data)
return sha1.hexdigest()
class HashVerificationError(error.Error):
pass
def verify_hash(filename, sha1):
"""Return True if the sha1 of the given file match the sha1 passed in."""
file_sha1 = hash_file(filename)
if sha1 != file_sha1:
raise HashVerificationError(
'verification failed: %s\nExpected: %s\nActual: %s' %
(filename, sha1, file_sha1))
def remove_tree(directory):
"""Recursively remove a directory and its contents."""
if not os.path.exists(directory):
return
if not os.path.isdir(directory):
raise error.Error('RemoveTree: not a directory: %s', directory)
shutil.rmtree(directory)
def rel_path(filename):
"""Return a pathname relative to the root the webports src tree.
This is used mostly to make output more readable when printing filenames."""
return os.path.relpath(filename, paths.NACLPORTS_ROOT)
def makedirs(directory):
if os.path.isdir(directory):
return
if os.path.exists(directory):
raise error.Error('mkdir: File exists and is not a directory: %s' %
directory)
trace("mkdir: %s" % directory)
os.makedirs(directory)
class DirLock(object):
"""Per-directory flock()-based context manager
This class will raise an exception if another process already holds the
lock for the given directory.
"""
def __init__(self, lock_dir):
if not os.path.exists(lock_dir):
makedirs(lock_dir)
self.file_name = os.path.join(lock_dir, 'webports.lock')
self.fd = open(self.file_name, 'w')
def __enter__(self):
try:
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except Exception:
raise error.Error("Unable to acquire lock (%s): Is webports already "
"running?" % self.file_name)
def __exit__(self, exc_type, exc_val, exc_tb):
os.remove(self.file_name)
self.fd.close()
class BuildLock(DirLock):
"""Lock used when building a package (essentially a lock on OUT_DIR)"""
def __init__(self):
super(BuildLock, self).__init__(paths.OUT_DIR)
class InstallLock(DirLock):
"""Lock used when installing/uninstalling package"""
def __init__(self, config):
root = get_install_root(config)
super(InstallLock, self).__init__(root)
check_stdout_for_color_support()
| lib/webports/util.py | 13,629 | Lock used when building a package (essentially a lock on OUT_DIR)
Per-directory flock()-based context manager
This class will raise an exception if another process already holds the
lock for the given directory.
Lock used when installing/uninstalling package
Check validity of NACL_SDK_ROOT.
Returns True if the currently configured SDK is 'version' or above.
Check that a given stamp file is up-to-date.
Returns: False is the file does not exists or is older that that given
comparison file, or does not contain the given contents. True otherwise.
Download a file from a given URL.
Args:
filename: the name of the file to download the URL to.
url: then URL to fetch.
Search user's PATH for a given executable.
Returns:
Full path to executable.
Returns the install location given a build configuration.
Returns the filename of the install stamp for for a given package.
This file is written at install time and contains metadata
about the installed package.
Returns the installation metadata folder for the give configuration.
Returns the filename of the list of installed files for a given package.
This file is written at install time.
Returns the current platform name according getos.py.
Returns the revision of the currently configured Native Client SDK.
Returns the root of the currently configured Native Client SDK.
Returns the version (as a string) of the current SDK.
Returns the toolchain folder for a given NaCl toolchain.
Return the SHA1 (in hex format) of the contents of the given file.
Returns True if the given package is installed.
Log a message to the console (stdout).
Log a colored/highlighted message with optional suffix.
Memoization decorator for functions taking one or more arguments.
Return a pathname relative to the root the webports src tree.
This is used mostly to make output more readable when printing filenames.
Recursively remove a directory and its contents.
Return True if the sha1 of the given file match the sha1 passed in.
Copyright 2014 The Native Client Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. Allow use of this module even if termcolor is missing. There are many standalone python scripts in build_tools that can be run directly without PYTHONPATH set (i.e. not via build/python_wrapper that adds this path. TODO(sbc): we should probably just assume that all the module dependencies are present. Require the latest version of the NaCl SDK. webports is built and tested against the pepper_canary release. To build aginst older versions of the SDK use the one of the pepper_XX branches (or use --skip-sdk-version-check). Inverse of arch_to_pkgarch When running in verbose mode make sure heading standout Ensure curl is in user's PATH Add --progress-bar but only if stdout is a TTY device. otherwise suppress status output, since curl always assumes its talking to a TTY and writes \r and \b characters. But add --show-error so that when curl fails it at least prints something. | 3,026 | en | 0.825113 |
from . import sean_common as common
import torch.nn as nn
import torch
from basicsr.utils.registry import ARCH_REGISTRY
class LFF(nn.Module):
def __init__(self, scale, n_colors, conv=common.default_conv, n_feats=64):
super(LFF, self).__init__()
kernel_size = 3
n_layes = 5
act = nn.ReLU(True)
m_head = [conv(3, n_feats, kernel_size)]
m_body = [
conv(
n_feats, n_feats, kernel_size
) for _ in range(n_layes)
]
m_tail = [
common.Upsampler(conv, scale, n_feats, act=False),
nn.Conv2d(
n_feats, n_colors, kernel_size,
padding=(kernel_size // 2)
)
]
self.LLF_head = nn.Sequential(*m_head)
self.LLF_body = nn.Sequential(*m_body)
self.LLF_tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.LLF_head(x)
x = self.LLF_body(x)
x = self.LLF_tail(x)
return x
class MSRB(nn.Module):
def __init__(self, conv=common.default_conv):
super(MSRB, self).__init__()
n_feats = 64
kernel_size_1 = 3
kernel_size_2 = 5
self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1)
self.conv_3_2 = conv(n_feats * 2, n_feats * 2, kernel_size_1)
self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2)
self.conv_5_2 = conv(n_feats * 2, n_feats * 2, kernel_size_2)
self.confusion = nn.Conv2d(n_feats * 4, n_feats, 1, padding=0, stride=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
input_1 = x
output_3_1 = self.relu(self.conv_3_1(input_1))
output_5_1 = self.relu(self.conv_5_1(input_1))
input_2 = torch.cat([output_3_1, output_5_1], 1)
output_3_2 = self.relu(self.conv_3_2(input_2))
output_5_2 = self.relu(self.conv_5_2(input_2))
input_3 = torch.cat([output_3_2, output_5_2], 1)
output = self.confusion(input_3)
output += x
return output
class Edge_Net(nn.Module):
def __init__(self, scale, n_colors, conv=common.default_conv, n_feats=64):
super(Edge_Net, self).__init__()
kernel_size = 3
act = nn.ReLU(True)
n_blocks = 5
self.n_blocks = n_blocks
modules_head = [conv(3, n_feats, kernel_size)]
modules_body = nn.ModuleList()
for i in range(n_blocks):
modules_body.append(
MSRB())
modules_tail = [
nn.Conv2d(n_feats * (self.n_blocks + 1), n_feats, 1, padding=0, stride=1),
conv(n_feats, n_feats, kernel_size),
common.Upsampler(conv, scale, n_feats, act=False),
conv(n_feats, n_colors, kernel_size)]
self.Edge_Net_head = nn.Sequential(*modules_head)
self.Edge_Net_body = nn.Sequential(*modules_body)
self.Edge_Net_tail = nn.Sequential(*modules_tail)
def forward(self, x):
x = self.Edge_Net_head(x)
res = x
MSRB_out = []
for i in range(self.n_blocks):
x = self.Edge_Net_body[i](x)
MSRB_out.append(x)
MSRB_out.append(res)
res = torch.cat(MSRB_out, 1)
x = self.Edge_Net_tail(res)
return x
class Net(nn.Module):
def __init__(self, scale, res_scale, conv=common.default_conv, n_feats=64):
super(Net, self).__init__()
n_resblock = 40
kernel_size = 3
act = nn.ReLU(True)
m_head = [conv(n_feats, n_feats, kernel_size)]
m_body = [
common.ResBlock(
conv, n_feats, kernel_size, act=act, res_scale=res_scale
) for _ in range(n_resblock)
]
m_tail = [conv(n_feats, 3, kernel_size)]
self.Net_head = nn.Sequential(*m_head)
self.Net_body = nn.Sequential(*m_body)
self.Net_tail = nn.Sequential(*m_tail)
def forward(self, x):
x = self.Net_head(x)
res = self.Net_body(x)
res += x
x = self.Net_tail(res)
return x
@ARCH_REGISTRY.register()
class SEAN(nn.Module):
def __init__(self,
n_feats,
scale,
rgb_range,
res_scale,
n_colors,
conv=common.default_conv):
super(SEAN, self).__init__()
kernel_size = 3
act = nn.ReLU(True)
rgb_mean = (0.4488, 0.4371, 0.4040)
rgb_std = (1.0, 1.0, 1.0)
self.sub_mean = common.MeanShift(rgb_range, rgb_mean, rgb_std)
# define head module
m_LFF = [LFF(scale, n_colors, n_feats=n_feats)]
# define body module
m_Edge = [Edge_Net(scale, n_colors, n_feats=n_feats)]
m_Fushion = [conv(6, n_feats, kernel_size=1)]
# define tail module
m_Net = [Net(scale, res_scale, n_feats=n_feats)]
self.add_mean = common.MeanShift(rgb_range, rgb_mean, rgb_std, 1)
self.lff = nn.Sequential(*m_LFF)
self.edge = nn.Sequential(*m_Edge)
self.fushion = nn.Sequential(*m_Fushion)
self.net = nn.Sequential(*m_Net)
def forward(self, x):
x = self.sub_mean(x)
low = self.lff(x)
high = self.edge(x)
out = torch.cat([low, high], 1)
out = self.fushion(out)
out = self.net(out)
x = self.add_mean(out)
return high, x
# import torch.nn as nn
# import torch
# from basicsr.utils.registry import ARCH_REGISTRY
#
#
# import math
#
# import torch
# import torch.nn as nn
#
#
# def default_conv(in_channels, out_channels, kernel_size, bias=True):
# return nn.Conv2d(
# in_channels, out_channels, kernel_size,
# padding=(kernel_size//2), bias=bias)
#
# class MeanShift(nn.Conv2d):
# def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1):
# super(MeanShift, self).__init__(3, 3, kernel_size=1)
# std = torch.Tensor(rgb_std)
# self.weight.data = torch.eye(3).view(3, 3, 1, 1)
# self.weight.data.div_(std.view(3, 1, 1, 1))
# self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean)
# self.bias.data.div_(std)
# self.requires_grad = False
#
# class BasicBlock(nn.Sequential):
# def __init__(
# self, in_channels, out_channels, kernel_size, stride=1, bias=False,
# bn=True, act=nn.ReLU(True)):
#
# m = [nn.Conv2d(
# in_channels, out_channels, kernel_size,
# padding=(kernel_size//2), stride=stride, bias=bias)
# ]
# if bn: m.append(nn.BatchNorm2d(out_channels))
# if act is not None: m.append(act)
# super(BasicBlock, self).__init__(*m)
#
# class ResBlock(nn.Module):
# def __init__(
# self, conv, n_feat, kernel_size,
# bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
#
# super(ResBlock, self).__init__()
# m = []
# for i in range(2):
# m.append(conv(n_feat, n_feat, kernel_size, bias=bias))
# if bn: m.append(nn.BatchNorm2d(n_feat))
# if i == 0: m.append(act)
#
# self.body = nn.Sequential(*m)
# self.res_scale = res_scale
#
# def forward(self, x):
# res = self.body(x).mul(self.res_scale)
# res += x
#
# return res
#
# class Upsampler(nn.Sequential):
# def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True):
#
# m = []
# if (scale & (scale - 1)) == 0: # Is scale = 2^n?
# for _ in range(int(math.log(scale, 2))):
# m.append(conv(n_feat, 4 * n_feat, 3, bias))
# m.append(nn.PixelShuffle(2))
# if bn: m.append(nn.BatchNorm2d(n_feat))
# if act: m.append(act())
# elif scale == 3:
# m.append(conv(n_feat, 9 * n_feat, 3, bias))
# m.append(nn.PixelShuffle(3))
# if bn: m.append(nn.BatchNorm2d(n_feat))
# if act: m.append(act())
# else:
# raise NotImplementedError
#
# super(Upsampler, self).__init__(*m)
#
# ## add SELayer
# class SELayer(nn.Module):
# def __init__(self, channel, reduction=16):
# super(SELayer, self).__init__()
# self.avg_pool = nn.AdaptiveAvgPool2d(1)
# self.conv_du = nn.Sequential(
# nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True),
# nn.ReLU(inplace=True),
# nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True),
# nn.Sigmoid()
# )
#
# def forward(self, x):
# y = self.avg_pool(x)
# y = self.conv_du(y)
# return x * y
#
# ## add SEResBlock
# class SEResBlock(nn.Module):
# def __init__(
# self, conv, n_feat, kernel_size, reduction,
# bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
#
# super(SEResBlock, self).__init__()
# modules_body = []
# for i in range(2):
# modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias))
# if bn: modules_body.append(nn.BatchNorm2d(n_feat))
# if i == 0: modules_body.append(act)
# modules_body.append(SELayer(n_feat, reduction))
# self.body = nn.Sequential(*modules_body)
# self.res_scale = res_scale
#
# def forward(self, x):
# res = self.body(x)
# #res = self.body(x).mul(self.res_scale)
# res += x
#
# return res
#
#
# class LFF(nn.Module):
# def __init__(self, scale, n_colors, conv=default_conv, n_feats=64):
# super(LFF, self).__init__()
#
# kernel_size = 3
# n_layes = 5
# act = nn.ReLU(True)
#
# m_head = [conv(3, n_feats, kernel_size)]
#
# m_body = [
# conv(
# n_feats, n_feats, kernel_size
# ) for _ in range(n_layes)
# ]
#
# m_tail = [
# Upsampler(conv, scale, n_feats, act=False),
# nn.Conv2d(
# n_feats, n_colors, kernel_size,
# padding=(kernel_size // 2)
# )
# ]
#
# self.LLF_head = nn.Sequential(*m_head)
# self.LLF_body = nn.Sequential(*m_body)
# self.LLF_tail = nn.Sequential(*m_tail)
#
# def forward(self, x):
# x = self.LLF_head(x)
# x = self.LLF_body(x)
# x = self.LLF_tail(x)
# return x
#
#
# class MSRB(nn.Module):
# def __init__(self, conv=default_conv):
# super(MSRB, self).__init__()
#
# n_feats = 64
# kernel_size_1 = 3
# kernel_size_2 = 5
#
# self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1)
# self.conv_3_2 = conv(n_feats * 2, n_feats * 2, kernel_size_1)
# self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2)
# self.conv_5_2 = conv(n_feats * 2, n_feats * 2, kernel_size_2)
# self.confusion = nn.Conv2d(n_feats * 4, n_feats, 1, padding=0, stride=1)
# self.relu = nn.ReLU(inplace=True)
#
# def forward(self, x):
# input_1 = x
# output_3_1 = self.relu(self.conv_3_1(input_1))
# output_5_1 = self.relu(self.conv_5_1(input_1))
# input_2 = torch.cat([output_3_1, output_5_1], 1)
# output_3_2 = self.relu(self.conv_3_2(input_2))
# output_5_2 = self.relu(self.conv_5_2(input_2))
# input_3 = torch.cat([output_3_2, output_5_2], 1)
# output = self.confusion(input_3)
# output += x
# return output
#
#
# class Edge_Net(nn.Module):
# def __init__(self, scale, n_colors, conv=default_conv, n_feats=64):
# super(Edge_Net, self).__init__()
#
# kernel_size = 3
# act = nn.ReLU(True)
# n_blocks = 5
# self.n_blocks = n_blocks
#
# modules_head = [conv(3, n_feats, kernel_size)]
#
# modules_body = nn.ModuleList()
# for i in range(n_blocks):
# modules_body.append(
# MSRB())
#
# modules_tail = [
# nn.Conv2d(n_feats * (self.n_blocks + 1), n_feats, 1, padding=0, stride=1),
# conv(n_feats, n_feats, kernel_size),
# Upsampler(conv, scale, n_feats, act=False),
# conv(n_feats, n_colors, kernel_size)]
#
# self.Edge_Net_head = nn.Sequential(*modules_head)
# self.Edge_Net_body = nn.Sequential(*modules_body)
# self.Edge_Net_tail = nn.Sequential(*modules_tail)
#
# def forward(self, x):
# x = self.Edge_Net_head(x)
# res = x
#
# MSRB_out = []
# for i in range(self.n_blocks):
# x = self.Edge_Net_body[i](x)
# MSRB_out.append(x)
# MSRB_out.append(res)
#
# res = torch.cat(MSRB_out, 1)
# x = self.Edge_Net_tail(res)
# return x
#
#
# class Net(nn.Module):
# def __init__(self, res_scale, conv=default_conv, n_feats=64):
# super(Net, self).__init__()
#
# n_resblock = 40
# kernel_size = 3
# act = nn.ReLU(True)
#
# m_head = [conv(n_feats, n_feats, kernel_size)]
#
# m_body = [
# ResBlock(
# conv, n_feats, kernel_size, act=act, res_scale=res_scale
# ) for _ in range(n_resblock)
# ]
#
# m_tail = [conv(n_feats, 3, kernel_size)]
#
# self.Net_head = nn.Sequential(*m_head)
# self.Net_body = nn.Sequential(*m_body)
# self.Net_tail = nn.Sequential(*m_tail)
#
# def forward(self, x):
# x = self.Net_head(x)
# res = self.Net_body(x)
# res += x
# x = self.Net_tail(res)
# return x
#
# @ARCH_REGISTRY.register()
# class SEAN(nn.Module):
# def __init__(self,
# n_feats,
# scale,
# n_colors,
# rgb_range,
# res_scale,
# conv=default_conv):
# super(SEAN, self).__init__()
#
# rgb_mean = (0.4488, 0.4371, 0.4040)
# rgb_std = (1.0, 1.0, 1.0)
# self.sub_mean = MeanShift(rgb_range, rgb_mean, rgb_std)
#
# # define head module
# m_LFF = [LFF(scale, n_colors, n_feats=n_feats)]
#
# # define body module
# m_Edge = [Edge_Net(scale, n_colors, n_feats=n_feats)]
#
# m_Fushion = [conv(6, n_feats, kernel_size=1)]
#
# # define tail module
# m_Net = [Net(res_scale, n_feats=n_feats)]
#
# self.add_mean = MeanShift(rgb_range, rgb_mean, rgb_std, 1)
#
# self.lff = nn.Sequential(*m_LFF)
# self.edge = nn.Sequential(*m_Edge)
# self.fushion = nn.Sequential(*m_Fushion)
# self.net = nn.Sequential(*m_Net)
#
# def forward(self, x):
# x = self.sub_mean(x)
# low = self.lff(x)
# high = self.edge(x)
# out = torch.cat([low, high], 1)
# out = self.fushion(out)
# out = self.net(out)
# x = self.add_mean(out)
# return high, x
| basicsr/archs/seanet_arch.py | 14,881 | define head module define body module define tail module import torch.nn as nn import torch from basicsr.utils.registry import ARCH_REGISTRY import math import torch import torch.nn as nn def default_conv(in_channels, out_channels, kernel_size, bias=True): return nn.Conv2d( in_channels, out_channels, kernel_size, padding=(kernel_size//2), bias=bias) class MeanShift(nn.Conv2d): def __init__(self, rgb_range, rgb_mean, rgb_std, sign=-1): super(MeanShift, self).__init__(3, 3, kernel_size=1) std = torch.Tensor(rgb_std) self.weight.data = torch.eye(3).view(3, 3, 1, 1) self.weight.data.div_(std.view(3, 1, 1, 1)) self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) self.bias.data.div_(std) self.requires_grad = False class BasicBlock(nn.Sequential): def __init__( self, in_channels, out_channels, kernel_size, stride=1, bias=False, bn=True, act=nn.ReLU(True)): m = [nn.Conv2d( in_channels, out_channels, kernel_size, padding=(kernel_size//2), stride=stride, bias=bias) ] if bn: m.append(nn.BatchNorm2d(out_channels)) if act is not None: m.append(act) super(BasicBlock, self).__init__(*m) class ResBlock(nn.Module): def __init__( self, conv, n_feat, kernel_size, bias=True, bn=False, act=nn.ReLU(True), res_scale=1): super(ResBlock, self).__init__() m = [] for i in range(2): m.append(conv(n_feat, n_feat, kernel_size, bias=bias)) if bn: m.append(nn.BatchNorm2d(n_feat)) if i == 0: m.append(act) self.body = nn.Sequential(*m) self.res_scale = res_scale def forward(self, x): res = self.body(x).mul(self.res_scale) res += x return res class Upsampler(nn.Sequential): def __init__(self, conv, scale, n_feat, bn=False, act=False, bias=True): m = [] if (scale & (scale - 1)) == 0: Is scale = 2^n? for _ in range(int(math.log(scale, 2))): m.append(conv(n_feat, 4 * n_feat, 3, bias)) m.append(nn.PixelShuffle(2)) if bn: m.append(nn.BatchNorm2d(n_feat)) if act: m.append(act()) elif scale == 3: m.append(conv(n_feat, 9 * n_feat, 3, bias)) m.append(nn.PixelShuffle(3)) if bn: m.append(nn.BatchNorm2d(n_feat)) if act: m.append(act()) else: raise NotImplementedError super(Upsampler, self).__init__(*m) add SELayer class SELayer(nn.Module): def __init__(self, channel, reduction=16): super(SELayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.conv_du = nn.Sequential( nn.Conv2d(channel, channel // reduction, 1, padding=0, bias=True), nn.ReLU(inplace=True), nn.Conv2d(channel // reduction, channel, 1, padding=0, bias=True), nn.Sigmoid() ) def forward(self, x): y = self.avg_pool(x) y = self.conv_du(y) return x * y add SEResBlock class SEResBlock(nn.Module): def __init__( self, conv, n_feat, kernel_size, reduction, bias=True, bn=False, act=nn.ReLU(True), res_scale=1): super(SEResBlock, self).__init__() modules_body = [] for i in range(2): modules_body.append(conv(n_feat, n_feat, kernel_size, bias=bias)) if bn: modules_body.append(nn.BatchNorm2d(n_feat)) if i == 0: modules_body.append(act) modules_body.append(SELayer(n_feat, reduction)) self.body = nn.Sequential(*modules_body) self.res_scale = res_scale def forward(self, x): res = self.body(x) res = self.body(x).mul(self.res_scale) res += x return res class LFF(nn.Module): def __init__(self, scale, n_colors, conv=default_conv, n_feats=64): super(LFF, self).__init__() kernel_size = 3 n_layes = 5 act = nn.ReLU(True) m_head = [conv(3, n_feats, kernel_size)] m_body = [ conv( n_feats, n_feats, kernel_size ) for _ in range(n_layes) ] m_tail = [ Upsampler(conv, scale, n_feats, act=False), nn.Conv2d( n_feats, n_colors, kernel_size, padding=(kernel_size // 2) ) ] self.LLF_head = nn.Sequential(*m_head) self.LLF_body = nn.Sequential(*m_body) self.LLF_tail = nn.Sequential(*m_tail) def forward(self, x): x = self.LLF_head(x) x = self.LLF_body(x) x = self.LLF_tail(x) return x class MSRB(nn.Module): def __init__(self, conv=default_conv): super(MSRB, self).__init__() n_feats = 64 kernel_size_1 = 3 kernel_size_2 = 5 self.conv_3_1 = conv(n_feats, n_feats, kernel_size_1) self.conv_3_2 = conv(n_feats * 2, n_feats * 2, kernel_size_1) self.conv_5_1 = conv(n_feats, n_feats, kernel_size_2) self.conv_5_2 = conv(n_feats * 2, n_feats * 2, kernel_size_2) self.confusion = nn.Conv2d(n_feats * 4, n_feats, 1, padding=0, stride=1) self.relu = nn.ReLU(inplace=True) def forward(self, x): input_1 = x output_3_1 = self.relu(self.conv_3_1(input_1)) output_5_1 = self.relu(self.conv_5_1(input_1)) input_2 = torch.cat([output_3_1, output_5_1], 1) output_3_2 = self.relu(self.conv_3_2(input_2)) output_5_2 = self.relu(self.conv_5_2(input_2)) input_3 = torch.cat([output_3_2, output_5_2], 1) output = self.confusion(input_3) output += x return output class Edge_Net(nn.Module): def __init__(self, scale, n_colors, conv=default_conv, n_feats=64): super(Edge_Net, self).__init__() kernel_size = 3 act = nn.ReLU(True) n_blocks = 5 self.n_blocks = n_blocks modules_head = [conv(3, n_feats, kernel_size)] modules_body = nn.ModuleList() for i in range(n_blocks): modules_body.append( MSRB()) modules_tail = [ nn.Conv2d(n_feats * (self.n_blocks + 1), n_feats, 1, padding=0, stride=1), conv(n_feats, n_feats, kernel_size), Upsampler(conv, scale, n_feats, act=False), conv(n_feats, n_colors, kernel_size)] self.Edge_Net_head = nn.Sequential(*modules_head) self.Edge_Net_body = nn.Sequential(*modules_body) self.Edge_Net_tail = nn.Sequential(*modules_tail) def forward(self, x): x = self.Edge_Net_head(x) res = x MSRB_out = [] for i in range(self.n_blocks): x = self.Edge_Net_body[i](x) MSRB_out.append(x) MSRB_out.append(res) res = torch.cat(MSRB_out, 1) x = self.Edge_Net_tail(res) return x class Net(nn.Module): def __init__(self, res_scale, conv=default_conv, n_feats=64): super(Net, self).__init__() n_resblock = 40 kernel_size = 3 act = nn.ReLU(True) m_head = [conv(n_feats, n_feats, kernel_size)] m_body = [ ResBlock( conv, n_feats, kernel_size, act=act, res_scale=res_scale ) for _ in range(n_resblock) ] m_tail = [conv(n_feats, 3, kernel_size)] self.Net_head = nn.Sequential(*m_head) self.Net_body = nn.Sequential(*m_body) self.Net_tail = nn.Sequential(*m_tail) def forward(self, x): x = self.Net_head(x) res = self.Net_body(x) res += x x = self.Net_tail(res) return x @ARCH_REGISTRY.register() class SEAN(nn.Module): def __init__(self, n_feats, scale, n_colors, rgb_range, res_scale, conv=default_conv): super(SEAN, self).__init__() rgb_mean = (0.4488, 0.4371, 0.4040) rgb_std = (1.0, 1.0, 1.0) self.sub_mean = MeanShift(rgb_range, rgb_mean, rgb_std) define head module m_LFF = [LFF(scale, n_colors, n_feats=n_feats)] define body module m_Edge = [Edge_Net(scale, n_colors, n_feats=n_feats)] m_Fushion = [conv(6, n_feats, kernel_size=1)] define tail module m_Net = [Net(res_scale, n_feats=n_feats)] self.add_mean = MeanShift(rgb_range, rgb_mean, rgb_std, 1) self.lff = nn.Sequential(*m_LFF) self.edge = nn.Sequential(*m_Edge) self.fushion = nn.Sequential(*m_Fushion) self.net = nn.Sequential(*m_Net) def forward(self, x): x = self.sub_mean(x) low = self.lff(x) high = self.edge(x) out = torch.cat([low, high], 1) out = self.fushion(out) out = self.net(out) x = self.add_mean(out) return high, x | 8,952 | en | 0.401985 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
autodoc_default_flags = ['members', 'inherited-members']
# -- Project information -----------------------------------------------------
project = 'Relaxed Lasso'
copyright = '2020 Continental Corporation'
author = 'Grégory Vial, Flora Estermann'
# The full version, including alpha/beta/rc tags
from relaxed_lasso._version import __version__
release = __version__
# -- General configuration ---------------------------------------------------
# Explicitly assign the master document.
master_doc = 'index'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
| docs/source/conf.py | 2,189 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Explicitly assign the master document. Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". | 1,570 | en | 0.696663 |
# Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.trainer.fn_args_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from tfx.components.trainer import fn_args_utils
from tfx.proto import trainer_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.types import standard_component_specs
from tfx.utils import proto_utils
class FnArgsUtilsTest(tf.test.TestCase):
def testGetCommonFnArgs(self):
source_data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'testdata')
# Create input dict.
examples = standard_artifacts.Examples()
examples.uri = os.path.join(source_data_dir,
'transform/transformed_examples')
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
transform_output = standard_artifacts.TransformGraph()
transform_output.uri = os.path.join(source_data_dir,
'transform/transform_graph')
schema = standard_artifacts.Schema()
schema.uri = os.path.join(source_data_dir, 'schema_gen')
base_model = standard_artifacts.Model()
base_model.uri = os.path.join(source_data_dir, 'trainer/previous')
input_dict = {
standard_component_specs.EXAMPLES_KEY: [examples],
standard_component_specs.TRANSFORM_GRAPH_KEY: [transform_output],
standard_component_specs.SCHEMA_KEY: [schema],
standard_component_specs.BASE_MODEL_KEY: [base_model],
}
# Create exec properties skeleton.
exec_properties = {
'train_args':
proto_utils.proto_to_json(trainer_pb2.TrainArgs(num_steps=1000)),
'eval_args':
proto_utils.proto_to_json(trainer_pb2.EvalArgs(num_steps=500)),
}
fn_args = fn_args_utils.get_common_fn_args(input_dict, exec_properties,
'tempdir')
self.assertEqual(fn_args.working_dir, 'tempdir')
self.assertEqual(fn_args.train_steps, 1000)
self.assertEqual(fn_args.eval_steps, 500)
self.assertLen(fn_args.train_files, 1)
self.assertEqual(fn_args.train_files[0],
os.path.join(examples.uri, 'Split-train', '*'))
self.assertLen(fn_args.eval_files, 1)
self.assertEqual(fn_args.eval_files[0],
os.path.join(examples.uri, 'Split-eval', '*'))
self.assertEqual(fn_args.schema_path,
os.path.join(schema.uri, 'schema.pbtxt'))
# Depending on execution environment, the base model may have been stored
# at .../Format-Servo/... or .../Format-Serving/... directory patterns.
self.assertRegexpMatches(
fn_args.base_model,
os.path.join(base_model.uri,
r'Format-(Servo|Serving)/export/chicago-taxi/\d+'))
self.assertEqual(fn_args.transform_graph_path, transform_output.uri)
self.assertIsInstance(fn_args.data_accessor, fn_args_utils.DataAccessor)
if __name__ == '__main__':
tf.test.main()
| tfx/components/trainer/fn_args_utils_test.py | 3,674 | Tests for tfx.components.trainer.fn_args_utils.
Lint as: python2, python3 Copyright 2019 Google LLC. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Create input dict. Create exec properties skeleton. Depending on execution environment, the base model may have been stored at .../Format-Servo/... or .../Format-Serving/... directory patterns. | 839 | en | 0.805423 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['UserSettings']
class UserSettings(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
properties: Optional[pulumi.Input[pulumi.InputType['UserPropertiesArgs']]] = None,
user_settings_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Response to get user settings
API Version: 2018-10-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties.
:param pulumi.Input[str] user_settings_name: The name of the user settings
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__['properties'] = properties
__props__['user_settings_name'] = user_settings_name
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:portal:UserSettings"), pulumi.Alias(type_="azure-native:portal/latest:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/latest:UserSettings"), pulumi.Alias(type_="azure-native:portal/v20181001:UserSettings"), pulumi.Alias(type_="azure-nextgen:portal/v20181001:UserSettings")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(UserSettings, __self__).__init__(
'azure-native:portal:UserSettings',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'UserSettings':
"""
Get an existing UserSettings resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["properties"] = None
return UserSettings(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.UserPropertiesResponse']:
"""
The cloud shell user settings properties.
"""
return pulumi.get(self, "properties")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| sdk/python/pulumi_azure_native/portal/user_settings.py | 4,236 | Response to get user settings
API Version: 2018-10-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['UserPropertiesArgs']] properties: The cloud shell user settings properties.
:param pulumi.Input[str] user_settings_name: The name of the user settings
Get an existing UserSettings resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
The cloud shell user settings properties.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 904 | en | 0.725692 |
# Copyright 2020. Allen Institute. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import numpy as np
import six
import matplotlib.pyplot as plt
import types
import copy
from functools import partial
from .spike_trains import SpikeTrains
from .spike_trains_api import SpikeTrainsAPI
def __get_spike_trains(spike_trains):
"""Make sure SpikeTrainsAPI object is always returned"""
if isinstance(spike_trains, six.string_types):
# Load spikes from file
return SpikeTrains.load(spike_trains)
elif isinstance(spike_trains, (SpikeTrains, SpikeTrainsAPI)):
return spike_trains
raise AttributeError('Could not parse spiketrains. Pass in file-path, SpikeTrains object, or list of the previous')
def __get_population(spike_trains, population):
"""Helper function to figure out which population of nodes to use."""
pops = spike_trains.populations
if population is None:
# If only one population exists in spikes object/file select that one
if len(pops) > 1:
raise Exception('SpikeTrains contains more than one population of nodes. Use "population" parameter '
'to specify population to display.')
else:
return pops[0]
elif population not in pops:
raise Exception('Could not find node population "{}" in SpikeTrains, only found {}'.format(population, pops))
else:
return population
def __get_node_groups(spike_trains, node_groups, population):
"""Helper function for parsing the 'node_groups' params"""
if node_groups is None:
# If none are specified by user make a 'node_group' consisting of all nodes
selected_nodes = spike_trains.node_ids(population=population)
return [{'node_ids': selected_nodes, 'c': 'b'}], selected_nodes
else:
# Fetch all node_ids which can be used to filter the data.
node_groups = copy.deepcopy(node_groups) # Make a copy since later we may be altering the dictionary
selected_nodes = np.array(node_groups[0]['node_ids'])
for grp in node_groups[1:]:
if 'node_ids' not in grp:
raise AttributeError('Could not find "node_ids" key in node_groups parameter.')
selected_nodes = np.concatenate((selected_nodes, np.array(grp['node_ids'])))
return node_groups, selected_nodes
def plot_raster(spike_trains, with_histogram=True, population=None, node_groups=None, times=None, title=None,
show=True, save_as=None):
"""will create a raster plot (plus optional histogram) from a SpikeTrains object or SONATA Spike-Trains file. Will
return the figure
By default will display all nodes, if you want to only display a subset of nodes and/or group together different
nodes (by node_id) by dot colors and labels then you can use the node_groups, which should be a list of dicts::
plot_raster('/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'}, # first 70 nodes are blue pyr cells
{'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}]) # last 30 nodes are red inh cells
The histogram will not be grouped.
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param with_histogram: If True the a histogram will be shown as a small subplot below the scatter plot. Default
True.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking
data.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
# Only show a legend if one of the node_groups have an explicit label, otherwise matplotlib will show an empty
# legend box which looks bad
show_legend = False
# Situation where if the last (or first) M nodes don't spike matplotlib will cut off the y range, but it should
# show these as empty rows. To do this need to keep track of range of all node_ids
min_id, max_id = np.inf, -1
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
min_ts, max_ts = times[0], times[1]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
min_ts = np.min(spikes_df['timestamps'])
max_ts = np.max(spikes_df['timestamps'])
# Used to determine
if with_histogram:
fig, axes = plt.subplots(2, 1, gridspec_kw={'height_ratios': [7, 1]}, squeeze=True)
raster_axes = axes[0]
bottom_axes = hist_axes = axes[1]
else:
fig, axes = plt.subplots(1, 1)
bottom_axes = raster_axes = axes
hist_axes = None
for node_grp in node_groups:
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
# If label exists for at-least one group we want to show
show_legend = show_legend or 'label' in node_grp
# Finds min/max node_id for all node groups
min_id = np.min([np.min(grp_ids), min_id])
max_id = np.max([np.max(grp_ids), max_id])
raster_axes.scatter(grp_spikes['timestamps'], grp_spikes['node_ids'], lw=0, s=8, **node_grp)
if show_legend:
raster_axes.legend(loc='upper right')
if title:
raster_axes.set_title(title)
raster_axes.set_ylabel('node_ids')
raster_axes.set_ylim(min_id - 0.5, max_id + 1) # add buffering to range else the rows at the ends look cut-off.
raster_axes.set_xlim(min_ts, max_ts + 1)
bottom_axes.set_xlabel('timestamps ({})'.format(spike_trains.units(population=pop)))
if with_histogram:
# Add a histogram if necessarry
hist_axes.hist(spikes_df['timestamps'], 100)
hist_axes.set_xlim(min_ts - 0.5, max_ts + 1)
hist_axes.axes.get_yaxis().set_visible(False)
raster_axes.set_xticks([])
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
def moving_average(data, window_size=10):
h = int(window_size / 2)
x_max = len(data)
return [np.mean(data[max(0, x - h):min(x_max, x + h)]) for x in range(0, x_max)]
def plot_rates(spike_trains, population=None, node_groups=None, times=None, smoothing=False,
smoothing_params=None, title=None, show=True, save_as=None):
"""Calculate and plot the rates of each node in a SpikeTrains object or SONATA Spike-Trains file. If start and stop
times are not specified from the "times" parameter, will try to parse values from the timestamps data.
If you want to only display a subset of nodes and/or group together different nodes (by node_id) by dot colors and
labels then you can use the node_groups, which should be a list of dicts::
plot_rates('/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'},
{'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}])
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking
data.
:param smoothing: Bool or function. Used to smooth the data. By default (False) no smoothing will be done. If True
will using a moving average smoothing function. Or use a function pointer.
:param smoothing_params: dict, parameters when using a function pointer smoothing value.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
# Determine if smoothing will be applied to the data
smoothing_params = smoothing_params or {} # pass in empty parameters
if isinstance(smoothing, types.FunctionType):
smoothing_fnc = partial(smoothing, **smoothing_params)
elif smoothing:
smoothing_fnc = partial(moving_average, **smoothing_params)
else:
smoothing_fnc = lambda d: d # Use a filler function that won't do anything
# get data
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
recording_interval = times[1] - times[0]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
recording_interval = np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])
# Iterate through each group of nodes and add to the same plot
fig, axes = plt.subplots()
show_legend = False # Only show labels if one of the node group has label value
for node_grp in node_groups:
show_legend = show_legend or 'label' in node_grp # If label exists for at-least one group we want to show
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
spike_rates = grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)
axes.plot(np.array(spike_rates.index), smoothing_fnc(spike_rates), '.', **node_grp)
axes.set_ylabel('Firing Rates (Hz)')
axes.set_xlabel('node_ids')
if show_legend:
axes.legend() # loc='upper right')
if title:
axes.set_title(title)
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig
def plot_rates_boxplot(spike_trains, population=None, node_groups=None, times=None, title=None, show=True,
save_as=None):
"""Creates a box plot of the firing rates taken from a SpikeTrains object or SONATA Spike-Trains file. If start
and stop times are not specified from the "times" parameter, will try to parse values from the timestamps data.
By default will plot all nodes together. To only display a subset of the nodes and/or create groups of nodes use
the node_groups options::
plot_rates_boxplot(
'/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'label': 'pyr'},
{'node_ids': range(70, 100), 'label': 'inh'}]
)
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
"""
spike_trains = __get_spike_trains(spike_trains=spike_trains)
pop = __get_population(spike_trains=spike_trains, population=population)
node_groups, selected_ids = __get_node_groups(spike_trains=spike_trains, node_groups=node_groups, population=pop)
spikes_df = spike_trains.to_dataframe(population=pop, with_population_col=False)
spikes_df = spikes_df[spikes_df['node_ids'].isin(selected_ids)]
if times is not None:
recording_interval = times[1] - times[0]
spikes_df = spikes_df[(spikes_df['timestamps'] >= times[0]) & (spikes_df['timestamps'] <= times[1])]
else:
recording_interval = np.max(spikes_df['timestamps']) - np.min(spikes_df['timestamps'])
fig, axes = plt.subplots()
rates_data = []
rates_labels = []
if len(node_groups) == 1 and 'label' not in node_groups[0]:
node_groups[0]['label'] = 'All Nodes'
for i, node_grp in enumerate(node_groups):
rates_labels.append(node_grp.get('label', 'Node Group {}'.format(i)))
grp_ids = node_grp.pop('node_ids')
grp_spikes = spikes_df[spikes_df['node_ids'].isin(grp_ids)]
spike_rates = grp_spikes.groupby('node_ids').size() / (recording_interval / 1000.0)
rates_data.append(spike_rates)
axes.boxplot(rates_data)
axes.set_ylabel('Firing Rates (Hz)')
axes.set_xticklabels(rates_labels)
if title:
axes.set_title(title)
if save_as:
plt.savefig(save_as)
if show:
plt.show()
return fig | bmtk/utils/reports/spike_trains/plotting.py | 16,245 | Helper function for parsing the 'node_groups' params
Helper function to figure out which population of nodes to use.
Make sure SpikeTrainsAPI object is always returned
will create a raster plot (plus optional histogram) from a SpikeTrains object or SONATA Spike-Trains file. Will
return the figure
By default will display all nodes, if you want to only display a subset of nodes and/or group together different
nodes (by node_id) by dot colors and labels then you can use the node_groups, which should be a list of dicts::
plot_raster('/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'}, # first 70 nodes are blue pyr cells
{'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}]) # last 30 nodes are red inh cells
The histogram will not be grouped.
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param with_histogram: If True the a histogram will be shown as a small subplot below the scatter plot. Default
True.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking
data.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
Calculate and plot the rates of each node in a SpikeTrains object or SONATA Spike-Trains file. If start and stop
times are not specified from the "times" parameter, will try to parse values from the timestamps data.
If you want to only display a subset of nodes and/or group together different nodes (by node_id) by dot colors and
labels then you can use the node_groups, which should be a list of dicts::
plot_rates('/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'c': 'b', 'label': 'pyr'},
{'node_ids': range(70, 100), 'c': 'r', 'label': 'inh'}])
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param times: (float, float). Used to set start and stop time. If not specified will try to find values from spiking
data.
:param smoothing: Bool or function. Used to smooth the data. By default (False) no smoothing will be done. If True
will using a moving average smoothing function. Or use a function pointer.
:param smoothing_params: dict, parameters when using a function pointer smoothing value.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
Creates a box plot of the firing rates taken from a SpikeTrains object or SONATA Spike-Trains file. If start
and stop times are not specified from the "times" parameter, will try to parse values from the timestamps data.
By default will plot all nodes together. To only display a subset of the nodes and/or create groups of nodes use
the node_groups options::
plot_rates_boxplot(
'/path/to/my/spike.h5',
node_groups=[{'node_ids': range(0, 70), 'label': 'pyr'},
{'node_ids': range(70, 100), 'label': 'inh'}]
)
:param spike_trains: SpikeTrains object or path to a (SONATA) spikes file.
:param population: string. If a spikes-file contains more than one population of nodes, use this to determine which
nodes to actually plot. If only one population exists and population=None then the function will find it by
default.
:param node_groups: None or list of dicts. Used to group sets of nodes by labels and color. Each grouping should
be a dictionary with a 'node_ids' key with a list of the ids. You can also add 'label' and 'c' keys for
label and color. If None all nodes will be labeled and colored the same.
:param title: str, Use to add a title. Default no tile
:param show: bool to display or not display plot. default True.
:param save_as: None or str: file-name/path to save the plot as a png/jpeg/etc. If None or empty string will not
save plot.
:return: matplotlib figure.Figure object
Copyright 2020. Allen Institute. All rights reserved Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Load spikes from file If only one population exists in spikes object/file select that one If none are specified by user make a 'node_group' consisting of all nodes Fetch all node_ids which can be used to filter the data. Make a copy since later we may be altering the dictionary Only show a legend if one of the node_groups have an explicit label, otherwise matplotlib will show an empty legend box which looks bad Situation where if the last (or first) M nodes don't spike matplotlib will cut off the y range, but it should show these as empty rows. To do this need to keep track of range of all node_ids Used to determine If label exists for at-least one group we want to show Finds min/max node_id for all node groups add buffering to range else the rows at the ends look cut-off. Add a histogram if necessarry Determine if smoothing will be applied to the data pass in empty parameters Use a filler function that won't do anything get data Iterate through each group of nodes and add to the same plot Only show labels if one of the node group has label value If label exists for at-least one group we want to show loc='upper right') | 7,980 | en | 0.728804 |
import time
import pygame
def manual_control(**kwargs):
from .knights_archers_zombies import env as _env
env = _env(**kwargs)
env.reset()
done = False
cur_agent = 0
frame_count = 0
# frame_limit = 500
quit_game = 0
while not done:
# while frame_count < frame_limit: # Uncomment this if you want the game to run for fame_limit amount of frames instead of ending by normal game conditions (useful for testing purposes)
agents = env.agents
frame_count += 1
actions = [5 for x in range(len(env.agents))] # If you want to do manual input
# 5 is do nothing, 0 is up, 1 is down, 2 is turn CW, 3 is CCW, 4 is attack
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
quit_game = 1
break
if event.key == pygame.K_BACKSPACE:
# Backspace to reset
env.reset()
# totalReward = 0
if event.key == pygame.K_a:
cur_agent -= 1
if cur_agent < 0:
cur_agent = len(agents) - 1
if event.key == pygame.K_d:
cur_agent += 1
if cur_agent > len(agents) - 1:
cur_agent = 0
if event.key == pygame.K_q:
actions[cur_agent] = 2
if event.key == pygame.K_e:
actions[cur_agent] = 3
if event.key == pygame.K_w:
actions[cur_agent] = 0
if event.key == pygame.K_s:
actions[cur_agent] = 1
if event.key == pygame.K_f:
actions[cur_agent] = 4
if quit_game:
break
for a in actions:
env.step(a)
env.render()
done = any(env.dones.values())
env.close()
| pettingzoo/gamma/knights_archers_zombies/manual_control.py | 1,988 | frame_limit = 500 while frame_count < frame_limit: Uncomment this if you want the game to run for fame_limit amount of frames instead of ending by normal game conditions (useful for testing purposes) If you want to do manual input 5 is do nothing, 0 is up, 1 is down, 2 is turn CW, 3 is CCW, 4 is attack Backspace to reset totalReward = 0 | 339 | en | 0.884702 |
import sys, getopt
from .summarizer import summarize
from .keywords import keywords
# Types of summarization
SENTENCE = 0
WORD = 1
def exit_with_error(err):
print("Error: " + str(err))
usage()
sys.exit(2)
def get_arguments():
try:
opts, args = getopt.getopt(sys.argv[1:], "t:s:r:w:h", ["text=", "summary=", "ratio=", "words=", "help"])
except getopt.GetoptError as err:
exit_with_error(err)
path = None
summarize_by = SENTENCE
ratio = 0.2
words = None
for o, a in opts:
if o in ("-t", "--text"):
path = a
elif o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-s", "--summary"):
summarize_by = int(a)
elif o in ("-w", "--words"):
words = int(a)
elif o in ("-r", "--ratio"):
ratio = float(a)
else:
assert False, "unhandled option"
if path is None:
exit_with_error("-t option is required.")
return path, summarize_by, ratio, words
help_text = """Usage: textrank -t FILE
-s UNIT, --summary=UNIT:
\tType of unit to summarize: sentence (0) or word (1). Default value: 0
\t0: Sentence. 1: Word
-t FILE, --text=FILE:
\tPATH to text to summarize
-r RATIO, --ratio=RATIO:
\tFloat number (0,1] that defines the length of the summary. It's a proportion of the original text. Default value: 0.2.
-w WORDS, --words=WORDS:
\tNumber to limit the length of the summary. The length option is ignored if the word limit is set.
-h, --help:
\tprints this help
"""
def usage():
print(help_text)
def textrank(text, summarize_by=SENTENCE, ratio=0.2, words=None):
if summarize_by == SENTENCE:
return summarize(text, ratio, words)
else:
return keywords(text, ratio, words)
def main():
path, summarize_by, ratio, words = get_arguments()
with open(path) as file:
text = file.read()
print(textrank(text, summarize_by, ratio, words))
if __name__ == "__main__":
main()
| summa/textrank.py | 2,017 | Types of summarization | 22 | en | 0.853054 |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import datetime
import re
import logging
from django.conf import settings
from django.utils import translation, timezone
from django.utils.translation import ugettext_lazy as _
from pipeline.core.flow.activity import Service, StaticIntervalGenerator
from pipeline.core.flow.io import StringItemSchema, ObjectItemSchema
from pipeline.component_framework.component import Component
from gcloud.core.models import Project
__group_name__ = _("蓝鲸服务(BK)")
LOGGER = logging.getLogger("celery")
class PauseService(Service):
__need_schedule__ = True
def execute(self, data, parent_data):
return True
def schedule(self, data, parent_data, callback_data=None):
if callback_data is not None:
data.outputs.callback_data = callback_data
self.finish_schedule()
return True
def inputs_format(self):
return [
self.InputItem(
name=_("描述"), key="description", type="string", schema=StringItemSchema(description=_("描述")),
)
]
def outputs_format(self):
return [
self.OutputItem(
name=_("API回调数据"),
key="callback_data",
type="object",
schema=ObjectItemSchema(description=_("通过node_callback API接口回调并传入数据,支持dict数据"), property_schemas={},),
),
]
class PauseComponent(Component):
name = _("暂停")
code = "pause_node"
bound_service = PauseService
form = settings.STATIC_URL + "components/atoms/bk/pause.js"
desc = _("该节点可以通过node_callback API接口进行回调并传入数据,callback_data参数为dict类型,回调数据会作为该节点的输出数据")
class SleepTimerService(Service):
__need_schedule__ = True
interval = StaticIntervalGenerator(0)
# 匹配年月日 时分秒 正则 yyyy-MM-dd HH:mm:ss
date_regex = re.compile(
r"%s %s"
% (
r"^(((\d{3}[1-9]|\d{2}[1-9]\d{1}|\d{1}[1-9]\d{2}|[1-9]\d{3}))|"
r"(29/02/((\d{2})(0[48]|[2468][048]|[13579][26])|((0[48]|[2468][048]|[3579][26])00))))-"
r"((0[13578]|1[02])-((0[1-9]|[12]\d|3[01]))|"
r"((0[469]|11)-(0[1-9]|[12]\d|30))|(02)-(0[1-9]|[1]\d|2[0-8]))",
r"((0|[1])\d|2[0-3]):(0|[1-5])\d:(0|[1-5])\d$",
)
)
seconds_regex = re.compile(r"^\d{1,8}$")
def inputs_format(self):
return [
self.InputItem(
name=_("定时时间"),
key="bk_timing",
type="string",
schema=StringItemSchema(description=_("定时时间,格式为秒(s) 或 (%%Y-%%m-%%d %%H:%%M:%%S)")),
),
self.InputItem(
name=_("是否强制晚于当前时间"),
key="force_check",
type="bool",
schema=StringItemSchema(description=_("用户输入日期格式时是否强制要求时间晚于当前时间,只对日期格式定时输入有效")),
),
]
def outputs_format(self):
return []
def execute(self, data, parent_data):
if parent_data.get_one_of_inputs("language"):
translation.activate(parent_data.get_one_of_inputs("language"))
timing = data.get_one_of_inputs("bk_timing")
force_check = data.get_one_of_inputs("force_check", True)
# 项目时区获取
project = Project.objects.get(id=parent_data.inputs.project_id)
project_tz = timezone.pytz.timezone(project.time_zone)
data.outputs.business_tz = project_tz
now = datetime.datetime.now(tz=project_tz)
if self.date_regex.match(str(timing)):
eta = project_tz.localize(datetime.datetime.strptime(timing, "%Y-%m-%d %H:%M:%S"))
if force_check and now > eta:
message = _("定时时间需晚于当前时间")
data.set_outputs("ex_data", message)
return False
elif self.seconds_regex.match(str(timing)):
# 如果写成+号 可以输入无限长,或考虑前端修改
eta = now + datetime.timedelta(seconds=int(timing))
else:
message = _("输入参数%s不符合【秒(s) 或 时间(%%Y-%%m-%%d %%H:%%M:%%S)】格式") % timing
data.set_outputs("ex_data", message)
return False
self.logger.info("wake time: {}".format(eta))
data.outputs.timing_time = eta
return True
def schedule(self, data, parent_data, callback_data=None):
timing_time = data.outputs.timing_time
business_tz = data.outputs.business_tz
now = datetime.datetime.now(tz=business_tz)
t_delta = timing_time - now
if t_delta.total_seconds() < 1:
self.finish_schedule()
# 这里减去 0.5s 的目的是尽可能的减去 execute 执行带来的误差
self.interval.interval = t_delta.total_seconds() - 0.5
return True
class SleepTimerComponent(Component):
name = _("定时")
code = "sleep_timer"
bound_service = SleepTimerService
form = settings.STATIC_URL + "components/atoms/bk/timer.js"
desc = _("最长定时时间受到环境配置影响,具体时长请咨询系统管理员")
| pipeline_plugins/components/collections/controller.py | 6,061 | Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-*- coding: utf-8 -*- 匹配年月日 时分秒 正则 yyyy-MM-dd HH:mm:ss 项目时区获取 如果写成+号 可以输入无限长,或考虑前端修改 这里减去 0.5s 的目的是尽可能的减去 execute 执行带来的误差 | 825 | en | 0.696723 |
__version__ = "0.1.0"
import mmap
import os
# from .ext import load_file, parse
#-------------------------------------------------------------------------------
def parse_file(path, **kw_args):
fd = os.open(path, os.O_RDONLY)
try:
map = mmap.mmap(fd, 0, prot=mmap.PROT_READ)
return parse(map, **kw_args)
finally:
os.close(fd)
| src/fixprs/__init__.py | 370 | from .ext import load_file, parse------------------------------------------------------------------------------- | 114 | en | 0.201762 |
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2022 Osyris contributors (https://github.com/nvaytet/osyris)
# flake8: noqa
from .array import Array
from .datagroup import Datagroup
from .dataset import Dataset
from .plot import Plot
from .ism_physics import get_eos, get_opacities
| src/osyris/core/__init__.py | 292 | SPDX-License-Identifier: BSD-3-Clause Copyright (c) 2022 Osyris contributors (https://github.com/nvaytet/osyris) flake8: noqa | 125 | en | 0.395197 |
from filelock import FileLock
import torch
import pandas as pd
from .tools import BuildingTemperatureDataset
import os
def train(model, device, dataloader, optimizer):
"""
Performs one epoch of training.
Order of rooms in building and in data must match otherwise model will fit wrong rooms to data.
"""
model.reset_iv() # Reset initial value
model.train()
model.cooling_policy.eval()
# Stops Autograd endlessly keeping track of the graph. Memory Leak!
for layer in model.cooling_policy.parameters():
layer.requires_grad = False
num_cols = len(model.building.rooms) # number of columns to use from data.
num_batches = len(dataloader)
train_loss = 0
loss_fn = torch.nn.MSELoss()
for batch, (time, temp) in enumerate(dataloader):
time, temp = time.to(device), temp.to(device) # Put on GPU if available
# Get model arguments:
time = time.squeeze(0)
temp = temp.squeeze(0)
# Compute prediction and loss
pred = model(time)
pred = pred.squeeze(-1) # change from column to row matrix
loss = loss_fn(pred[:, 2:], temp[:, 0:num_cols])
train_loss += loss.item()
# get last output and use for next initial value
model.iv = pred[-1, :].unsqueeze(1).detach() # MUST DETACH GRAD
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
return train_loss / num_batches
def test(model, device, dataloader):
model.reset_iv() # Reset initial value
model.eval() # Put model in evaluation mode
num_batches = len(dataloader)
num_cols = len(model.building.rooms) # number of columns to take from data.
test_loss = 0
loss_fn = torch.nn.MSELoss()
with torch.no_grad():
for (time, temp) in dataloader:
time, temp = time.to(device), temp.to(device) # Put on GPU if available
time = time.squeeze(0)
temp = temp.squeeze(0)
pred = model(time)
pred = pred.squeeze(-1) # change from column to row matrix
test_loss += loss_fn(pred[:, 2:], temp[:, 0:num_cols]).item()
# get last output and use for next initial value
model.iv = pred[-1, :].unsqueeze(1).detach() # MUST DETACH GRAD
test_loss /= num_batches
return test_loss
def dataset_creator(path, sample_size, dt):
path_sorted = sort_data(path, dt)
with FileLock(f"{os.path.dirname(os.path.abspath(path_sorted))}.lock"):
training_data = BuildingTemperatureDataset(path_sorted, sample_size, train=True)
train_dataloader = torch.utils.data.DataLoader(training_data, batch_size=1, shuffle=False)
test_data = BuildingTemperatureDataset(path_sorted, sample_size, test=True)
test_dataloader = torch.utils.data.DataLoader(test_data, batch_size=1, shuffle=False)
return train_dataloader, test_dataloader
def sort_data(path, dt):
"""
Check if path has sorted data tag (_sorted)
If not check if data has previously been sorted and exists in the directory.
Check to see if the value dt is correct
If not sort data and write filename_sorted.csv
data is sorted by time in ascending order and downsampled to a frequency of dt seconds.
Missing values are interpolated.
A time-date string is also inserted.
"""
def sort(path, dt):
df = pd.read_csv(path)
if path[-11:] == '_sorted.csv':
path_sorted = path
else:
path_sorted = path[:-4] + '_sorted.csv'
# Sort df by time (raw data not always in order)
df = df.sort_values(by=["time"], ascending=True)
# insert date-time value at start of df
try:
df.insert(loc=0, column='date-time', value=pd.to_datetime(df['time'], unit='ms'))
except ValueError:
raise ValueError('Data appears to have already been sorted. Check if still appropriate and add _sorted.csv tag to avoid this error.')
# downscale data to a frequency of dt (seconds) use the mean value and round to 2dp.
df = df.set_index('date-time').resample(str(dt) + 's').mean().round(2)
# time column is converted to unix epoch seconds to match the date-time
df["time"] = (df.index - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s")
# change date-time from UTC to Local time
df = df.tz_localize('Europe/London')
df = df.interpolate().round(2) # interpolate missing values NaN
df.to_csv(path_sorted, index=True)
def need_to_sort(path, dt):
def get_dt(path):
df_dt = pd.read_csv(path)['time'][0:2].values
return df_dt[1] - df_dt[0]
# Does path already have sorted tag?
if path[-11:] == '_sorted.csv':
# if so, is dt correct?
if get_dt(path) == dt:
return False # path and file is correct dont sort
else:
return True # dt is wrong, re-sort
# path does not contain _sorted.csv
else:
# Does path_sorted exist?
path_sorted = path[:-4] + '_sorted.csv'
import os.path
if os.path.isfile(path_sorted): # check if file already exists
# if file exists check if dt is correct
if get_dt(path_sorted) == dt:
return False # correct file already exists don't sort
else:
return True # file exists but dt wrong, re-sort
else: # File doesn't exist
return True
if need_to_sort(path, dt):
sort(path, dt)
# return the path_sorted
if path[-11:] == '_sorted.csv':
path_sorted = path
else:
path_sorted = path[:-4] + '_sorted.csv'
return path_sorted
class OptimiseRC:
"""
Parameters
----------
model : object
RCModel class object.
csv_path : string
Path to .csv file containing room temperature data.
Data will be sorted if not done already and saved to a new file with the tag '_sorted'
sample_size : int
Length of indexes to sample from dataset per batch.
dt : int
Timestep data will be resampled to.
lr : float
Learning rate for optimiser.
model_id : int
Unique identifier used when optimising multiple models.
see https://docs.ray.io/en/latest/using-ray-with-pytorch.html
"""
def __init__(self, model, csv_path, sample_size, dt=30, lr=1e-3, opt_id=0):
self.model = model
self.model.init_params() # randomise parameters
self.model_id = opt_id
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.train_dataloader, self.test_dataloader = dataset_creator(csv_path, int(sample_size), int(dt))
self.optimizer = torch.optim.Adam([self.model.params, self.model.cooling], lr=lr)
def train(self):
avg_loss = train(self.model, self.device, self.train_dataloader, self.optimizer)
return avg_loss
def test(self):
test_loss = test(self.model, self.device, self.test_dataloader)
return test_loss
def train_loop(self, epochs):
print(self.model.params)
for i in range(int(epochs)):
# print(f"Epoch {i + 1}\n-------------------------------")
testloss = self.train()
results = [testloss, self.model]
return results
| src/rcmodel/optimisation.py | 7,497 | Parameters
----------
model : object
RCModel class object.
csv_path : string
Path to .csv file containing room temperature data.
Data will be sorted if not done already and saved to a new file with the tag '_sorted'
sample_size : int
Length of indexes to sample from dataset per batch.
dt : int
Timestep data will be resampled to.
lr : float
Learning rate for optimiser.
model_id : int
Unique identifier used when optimising multiple models.
see https://docs.ray.io/en/latest/using-ray-with-pytorch.html
Check if path has sorted data tag (_sorted)
If not check if data has previously been sorted and exists in the directory.
Check to see if the value dt is correct
If not sort data and write filename_sorted.csv
data is sorted by time in ascending order and downsampled to a frequency of dt seconds.
Missing values are interpolated.
A time-date string is also inserted.
Performs one epoch of training.
Order of rooms in building and in data must match otherwise model will fit wrong rooms to data.
Reset initial value Stops Autograd endlessly keeping track of the graph. Memory Leak! number of columns to use from data. Put on GPU if available Get model arguments: Compute prediction and loss change from column to row matrix get last output and use for next initial value MUST DETACH GRAD Backpropagation Reset initial value Put model in evaluation mode number of columns to take from data. Put on GPU if available change from column to row matrix get last output and use for next initial value MUST DETACH GRAD Sort df by time (raw data not always in order) insert date-time value at start of df downscale data to a frequency of dt (seconds) use the mean value and round to 2dp. time column is converted to unix epoch seconds to match the date-time change date-time from UTC to Local time interpolate missing values NaN Does path already have sorted tag? if so, is dt correct? path and file is correct dont sort dt is wrong, re-sort path does not contain _sorted.csv Does path_sorted exist? check if file already exists if file exists check if dt is correct correct file already exists don't sort file exists but dt wrong, re-sort File doesn't exist return the path_sorted randomise parameters print(f"Epoch {i + 1}\n-------------------------------") | 2,284 | en | 0.806912 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import json
import unittest
from unittest import mock
import pandas as pd
import airflow.providers.amazon.aws.operators.hive_to_dynamodb
from airflow import DAG
from airflow.providers.amazon.aws.hooks.aws_dynamodb_hook import AwsDynamoDBHook
DEFAULT_DATE = datetime.datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
try:
from moto import mock_dynamodb2
except ImportError:
mock_dynamodb2 = None
class TestHiveToDynamoDBTransferOperator(unittest.TestCase):
def setUp(self):
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG('test_dag_id', default_args=args)
self.dag = dag
self.sql = 'SELECT 1'
self.hook = AwsDynamoDBHook(
aws_conn_id='aws_default', region_name='us-east-1')
@staticmethod
def process_data(data, *args, **kwargs):
return json.loads(data.to_json(orient='records'))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_conn_returns_a_boto3_connection(self):
hook = AwsDynamoDBHook(aws_conn_id='aws_default')
self.assertIsNotNone(hook.get_conn())
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_get_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.providers.amazon.aws.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name="test_airflow",
task_id='hive_to_dynamodb_check',
table_keys=['id'],
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
@mock.patch('airflow.providers.apache.hive.hooks.hive.HiveServer2Hook.get_pandas_df',
return_value=pd.DataFrame(data=[('1', 'sid'), ('1', 'gupta')], columns=['id', 'name']))
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamodb2 package not present')
@mock_dynamodb2
def test_pre_process_records_with_schema(self, mock_get_pandas_df):
# this table needs to be created in production
self.hook.get_conn().create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'id',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
operator = airflow.providers.amazon.aws.operators.hive_to_dynamodb.HiveToDynamoDBTransferOperator(
sql=self.sql,
table_name='test_airflow',
task_id='hive_to_dynamodb_check',
table_keys=['id'],
pre_process=self.process_data,
dag=self.dag)
operator.execute(None)
table = self.hook.get_conn().Table('test_airflow')
table.meta.client.get_waiter('table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 1)
if __name__ == '__main__':
unittest.main()
| tests/providers/amazon/aws/operators/test_hive_to_dynamodb.py | 5,060 | -*- coding: utf-8 -*- Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. this table needs to be created in production this table needs to be created in production | 864 | en | 0.896673 |
# protoc plugin to map from FileDescriptorProtos to Envoy doc style RST.
# See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto
# for the underlying protos mentioned in this file. See
# https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax.
from collections import defaultdict
import json
import functools
import os
import pathlib
import re
import string
import sys
from google.protobuf import json_format
from bazel_tools.tools.python.runfiles import runfiles
import yaml
# We have to do some evil things to sys.path due to the way that Python module
# resolution works; we have both tools/ trees in bazel_tools and envoy. By
# default, Bazel leaves us with a sys.path in which the @bazel_tools repository
# takes precedence. Now that we're done with importing runfiles above, we can
# just remove it from the sys.path.
sys.path = [p for p in sys.path if not p.endswith('bazel_tools')]
from tools.api_proto_plugin import annotations
from tools.api_proto_plugin import plugin
from tools.api_proto_plugin import visitor
from tools.config_validation import validate_fragment
from tools.protodoc import manifest_pb2
from udpa.annotations import security_pb2
from udpa.annotations import status_pb2
from validate import validate_pb2
# Namespace prefix for Envoy core APIs.
ENVOY_API_NAMESPACE_PREFIX = '.envoy.api.v2.'
# Namespace prefix for Envoy top-level APIs.
ENVOY_PREFIX = '.envoy.'
# Namespace prefix for WKTs.
WKT_NAMESPACE_PREFIX = '.google.protobuf.'
# Namespace prefix for RPCs.
RPC_NAMESPACE_PREFIX = '.google.rpc.'
# http://www.fileformat.info/info/unicode/char/2063/index.htm
UNICODE_INVISIBLE_SEPARATOR = u'\u2063'
# Template for data plane API URLs.
DATA_PLANE_API_URL_FMT = 'https://github.com/envoyproxy/envoy/blob/{}/api/%s#L%d'.format(
os.environ['ENVOY_BLOB_SHA'])
# Template for formating extension descriptions.
EXTENSION_TEMPLATE = string.Template("""$anchor
This extension may be referenced by the qualified name *$extension*
.. note::
$status
$security_posture
""")
# Template for formating extension's category/ies.
EXTENSION_CATEGORIES_TEMPLATE = string.Template("""
.. tip::
$message:
$categories
""")
# Template for formating an extension category.
EXTENSION_CATEGORY_TEMPLATE = string.Template("""$anchor
.. tip::
This extension category has the following known extensions:
$extensions
""")
# A map from the extension security postures (as defined in the
# envoy_cc_extension build macro) to human readable text for extension docs.
EXTENSION_SECURITY_POSTURES = {
'robust_to_untrusted_downstream':
'This extension is intended to be robust against untrusted downstream traffic. It '
'assumes that the upstream is trusted.',
'robust_to_untrusted_downstream_and_upstream':
'This extension is intended to be robust against both untrusted downstream and '
'upstream traffic.',
'requires_trusted_downstream_and_upstream':
'This extension is not hardened and should only be used in deployments'
' where both the downstream and upstream are trusted.',
'unknown':
'This extension has an unknown security posture and should only be '
'used in deployments where both the downstream and upstream are '
'trusted.',
'data_plane_agnostic':
'This extension does not operate on the data plane and hence is intended to be robust against untrusted traffic.',
}
# A map from the extension status value to a human readable text for extension
# docs.
EXTENSION_STATUS_VALUES = {
'alpha':
'This extension is functional but has not had substantial production burn time, use only with this caveat.',
'wip':
'This extension is work-in-progress. Functionality is incomplete and it is not intended for production use.',
}
EXTENSION_DB = json.loads(pathlib.Path(os.getenv('EXTENSION_DB_PATH')).read_text())
# create an index of extension categories from extension db
EXTENSION_CATEGORIES = {}
for _k, _v in EXTENSION_DB.items():
for _cat in _v['categories']:
EXTENSION_CATEGORIES.setdefault(_cat, []).append(_k)
class ProtodocError(Exception):
"""Base error class for the protodoc module."""
def HideNotImplemented(comment):
"""Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?"""
return annotations.NOT_IMPLEMENTED_HIDE_ANNOTATION in comment.annotations
def GithubUrl(type_context):
"""Obtain data plane API Github URL by path from a TypeContext.
Args:
type_context: type_context.TypeContext for node.
Returns:
A string with a corresponding data plane API GitHub Url.
"""
if type_context.location is not None:
return DATA_PLANE_API_URL_FMT % (type_context.source_code_info.name,
type_context.location.span[0])
return ''
def FormatCommentWithAnnotations(comment, type_name=''):
"""Format a comment string with additional RST for annotations.
Args:
comment: comment string.
type_name: optional, 'message' or 'enum' may be specified for additional
message/enum specific annotations.
Returns:
A string with additional RST from annotations.
"""
formatted_extension = ''
if annotations.EXTENSION_ANNOTATION in comment.annotations:
extension = comment.annotations[annotations.EXTENSION_ANNOTATION]
formatted_extension = FormatExtension(extension)
formatted_extension_category = ''
if annotations.EXTENSION_CATEGORY_ANNOTATION in comment.annotations:
formatted_extension_category = FormatExtensionCategory(
comment.annotations[annotations.EXTENSION_CATEGORY_ANNOTATION])
comment = annotations.WithoutAnnotations(StripLeadingSpace(comment.raw) + '\n')
return comment + formatted_extension + formatted_extension_category
def MapLines(f, s):
"""Apply a function across each line in a flat string.
Args:
f: A string transform function for a line.
s: A string consisting of potentially multiple lines.
Returns:
A flat string with f applied to each line.
"""
return '\n'.join(f(line) for line in s.split('\n'))
def Indent(spaces, line):
"""Indent a string."""
return ' ' * spaces + line
def IndentLines(spaces, lines):
"""Indent a list of strings."""
return map(functools.partial(Indent, spaces), lines)
def FormatInternalLink(text, ref):
return ':ref:`%s <%s>`' % (text, ref)
def FormatExternalLink(text, ref):
return '`%s <%s>`_' % (text, ref)
def FormatHeader(style, text):
"""Format RST header.
Args:
style: underline style, e.g. '=', '-'.
text: header text
Returns:
RST formatted header.
"""
return '%s\n%s\n\n' % (text, style * len(text))
def FormatExtension(extension):
"""Format extension metadata as RST.
Args:
extension: the name of the extension, e.g. com.acme.foo.
Returns:
RST formatted extension description.
"""
try:
extension_metadata = EXTENSION_DB[extension]
anchor = FormatAnchor('extension_' + extension)
status = EXTENSION_STATUS_VALUES.get(extension_metadata['status'], '')
security_posture = EXTENSION_SECURITY_POSTURES[extension_metadata['security_posture']]
extension = EXTENSION_TEMPLATE.substitute(anchor=anchor,
extension=extension,
status=status,
security_posture=security_posture)
categories = FormatExtensionList(extension_metadata["categories"], "extension_category")
cat_or_cats = "categories" if len(categories) > 1 else "category"
category_message = f"This extension extends and can be used with the following extension {cat_or_cats}"
extension_category = EXTENSION_CATEGORIES_TEMPLATE.substitute(message=category_message,
categories=categories)
return f"{extension}\n\n{extension_category}"
except KeyError as e:
sys.stderr.write(
'\n\nDid you forget to add an entry to source/extensions/extensions_build_config.bzl?\n\n')
exit(1) # Raising the error buries the above message in tracebacks.
def FormatExtensionList(items, prefix="extension", indent=2):
indent = " " * indent
formatted_list = "\n".join(f"{indent}- :ref:`{ext} <{prefix}_{ext}>`" for ext in items)
return f"{formatted_list}\n{indent}\n"
def FormatExtensionCategory(extension_category):
"""Format extension metadata as RST.
Args:
extension_category: the name of the extension_category, e.g. com.acme.
Returns:
RST formatted extension category description.
"""
try:
extensions = EXTENSION_CATEGORIES[extension_category]
except KeyError as e:
raise ProtodocError(f"\n\nUnable to find extension category: {extension_category}\n\n")
anchor = FormatAnchor('extension_category_' + extension_category)
extensions = FormatExtensionList(sorted(extensions))
return EXTENSION_CATEGORY_TEMPLATE.substitute(anchor=anchor, extensions=extensions)
def FormatHeaderFromFile(style, source_code_info, proto_name):
"""Format RST header based on special file level title
Args:
style: underline style, e.g. '=', '-'.
source_code_info: SourceCodeInfo object.
proto_name: If the file_level_comment does not contain a user specified
title, use this as page title.
Returns:
RST formatted header, and file level comment without page title strings.
"""
anchor = FormatAnchor(FileCrossRefLabel(proto_name))
stripped_comment = annotations.WithoutAnnotations(
StripLeadingSpace('\n'.join(c + '\n' for c in source_code_info.file_level_comments)))
formatted_extension = ''
if annotations.EXTENSION_ANNOTATION in source_code_info.file_level_annotations:
extension = source_code_info.file_level_annotations[annotations.EXTENSION_ANNOTATION]
formatted_extension = FormatExtension(extension)
if annotations.DOC_TITLE_ANNOTATION in source_code_info.file_level_annotations:
return anchor + FormatHeader(
style, source_code_info.file_level_annotations[
annotations.DOC_TITLE_ANNOTATION]) + formatted_extension, stripped_comment
return anchor + FormatHeader(style, proto_name) + formatted_extension, stripped_comment
def FormatFieldTypeAsJson(type_context, field):
"""Format FieldDescriptorProto.Type as a pseudo-JSON string.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted pseudo-JSON string representation of field type.
"""
if TypeNameFromFQN(field.type_name) in type_context.map_typenames:
return '"{...}"'
if field.label == field.LABEL_REPEATED:
return '[]'
if field.type == field.TYPE_MESSAGE:
return '"{...}"'
return '"..."'
def FormatMessageAsJson(type_context, msg):
"""Format a message definition DescriptorProto as a pseudo-JSON block.
Args:
type_context: contextual information for message/enum/field.
msg: message definition DescriptorProto.
Return: RST formatted pseudo-JSON string representation of message definition.
"""
lines = []
for index, field in enumerate(msg.field):
field_type_context = type_context.ExtendField(index, field.name)
leading_comment = field_type_context.leading_comment
if HideNotImplemented(leading_comment):
continue
lines.append('"%s": %s' % (field.name, FormatFieldTypeAsJson(type_context, field)))
if lines:
return '.. code-block:: json\n\n {\n' + ',\n'.join(IndentLines(4, lines)) + '\n }\n\n'
else:
return '.. code-block:: json\n\n {}\n\n'
def NormalizeFieldTypeName(field_fqn):
"""Normalize a fully qualified field type name, e.g.
.envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.
Return: Normalized type name.
"""
if field_fqn.startswith(ENVOY_API_NAMESPACE_PREFIX):
return field_fqn[len(ENVOY_API_NAMESPACE_PREFIX):]
if field_fqn.startswith(ENVOY_PREFIX):
return field_fqn[len(ENVOY_PREFIX):]
return field_fqn
def NormalizeTypeContextName(type_name):
"""Normalize a type name, e.g.
envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
type_name: a name from a TypeContext.
Return: Normalized type name.
"""
return NormalizeFieldTypeName(QualifyTypeName(type_name))
def QualifyTypeName(type_name):
return '.' + type_name
def TypeNameFromFQN(fqn):
return fqn[1:]
def FormatEmph(s):
"""RST format a string for emphasis."""
return '*%s*' % s
def FormatFieldType(type_context, field):
"""Format a FieldDescriptorProto type description.
Adds cross-refs for message types.
TODO(htuch): Add cross-refs for enums as well.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted field type.
"""
if field.type_name.startswith(ENVOY_API_NAMESPACE_PREFIX) or field.type_name.startswith(
ENVOY_PREFIX):
type_name = NormalizeFieldTypeName(field.type_name)
if field.type == field.TYPE_MESSAGE:
if type_context.map_typenames and TypeNameFromFQN(
field.type_name) in type_context.map_typenames:
return 'map<%s, %s>' % tuple(
map(functools.partial(FormatFieldType, type_context),
type_context.map_typenames[TypeNameFromFQN(field.type_name)]))
return FormatInternalLink(type_name, MessageCrossRefLabel(type_name))
if field.type == field.TYPE_ENUM:
return FormatInternalLink(type_name, EnumCrossRefLabel(type_name))
elif field.type_name.startswith(WKT_NAMESPACE_PREFIX):
wkt = field.type_name[len(WKT_NAMESPACE_PREFIX):]
return FormatExternalLink(
wkt, 'https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#%s' %
wkt.lower())
elif field.type_name.startswith(RPC_NAMESPACE_PREFIX):
rpc = field.type_name[len(RPC_NAMESPACE_PREFIX):]
return FormatExternalLink(
rpc,
'https://cloud.google.com/natural-language/docs/reference/rpc/google.rpc#%s' % rpc.lower())
elif field.type_name:
return field.type_name
pretty_type_names = {
field.TYPE_DOUBLE: 'double',
field.TYPE_FLOAT: 'float',
field.TYPE_INT32: 'int32',
field.TYPE_SFIXED32: 'int32',
field.TYPE_SINT32: 'int32',
field.TYPE_FIXED32: 'uint32',
field.TYPE_UINT32: 'uint32',
field.TYPE_INT64: 'int64',
field.TYPE_SFIXED64: 'int64',
field.TYPE_SINT64: 'int64',
field.TYPE_FIXED64: 'uint64',
field.TYPE_UINT64: 'uint64',
field.TYPE_BOOL: 'bool',
field.TYPE_STRING: 'string',
field.TYPE_BYTES: 'bytes',
}
if field.type in pretty_type_names:
return FormatExternalLink(pretty_type_names[field.type],
'https://developers.google.com/protocol-buffers/docs/proto#scalar')
raise ProtodocError('Unknown field type ' + str(field.type))
def StripLeadingSpace(s):
"""Remove leading space in flat comment strings."""
return MapLines(lambda s: s[1:], s)
def FileCrossRefLabel(msg_name):
"""File cross reference label."""
return 'envoy_api_file_%s' % msg_name
def MessageCrossRefLabel(msg_name):
"""Message cross reference label."""
return 'envoy_api_msg_%s' % msg_name
def EnumCrossRefLabel(enum_name):
"""Enum cross reference label."""
return 'envoy_api_enum_%s' % enum_name
def FieldCrossRefLabel(field_name):
"""Field cross reference label."""
return 'envoy_api_field_%s' % field_name
def EnumValueCrossRefLabel(enum_value_name):
"""Enum value cross reference label."""
return 'envoy_api_enum_value_%s' % enum_value_name
def FormatAnchor(label):
"""Format a label as an Envoy API RST anchor."""
return '.. _%s:\n\n' % label
def FormatSecurityOptions(security_option, field, type_context, edge_config):
sections = []
if security_option.configure_for_untrusted_downstream:
sections.append(
Indent(4, 'This field should be configured in the presence of untrusted *downstreams*.'))
if security_option.configure_for_untrusted_upstream:
sections.append(
Indent(4, 'This field should be configured in the presence of untrusted *upstreams*.'))
if edge_config.note:
sections.append(Indent(4, edge_config.note))
example_dict = json_format.MessageToDict(edge_config.example)
validate_fragment.ValidateFragment(field.type_name[1:], example_dict)
field_name = type_context.name.split('.')[-1]
example = {field_name: example_dict}
sections.append(
Indent(4, 'Example configuration for untrusted environments:\n\n') +
Indent(4, '.. code-block:: yaml\n\n') +
'\n'.join(IndentLines(6,
yaml.dump(example).split('\n'))))
return '.. attention::\n' + '\n\n'.join(sections)
def FormatFieldAsDefinitionListItem(outer_type_context, type_context, field, protodoc_manifest):
"""Format a FieldDescriptorProto as RST definition list item.
Args:
outer_type_context: contextual information for enclosing message.
type_context: contextual information for message/enum/field.
field: FieldDescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
"""
field_annotations = []
anchor = FormatAnchor(FieldCrossRefLabel(NormalizeTypeContextName(type_context.name)))
if field.options.HasExtension(validate_pb2.rules):
rule = field.options.Extensions[validate_pb2.rules]
if ((rule.HasField('message') and rule.message.required) or
(rule.HasField('duration') and rule.duration.required) or
(rule.HasField('string') and rule.string.min_len > 0) or
(rule.HasField('string') and rule.string.min_bytes > 0) or
(rule.HasField('repeated') and rule.repeated.min_items > 0)):
field_annotations = ['*REQUIRED*']
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return ''
if field.HasField('oneof_index'):
oneof_context = outer_type_context.ExtendOneof(field.oneof_index,
type_context.oneof_names[field.oneof_index])
oneof_comment = oneof_context.leading_comment
formatted_oneof_comment = FormatCommentWithAnnotations(oneof_comment)
if HideNotImplemented(oneof_comment):
return ''
# If the oneof only has one field and marked required, mark the field as required.
if len(type_context.oneof_fields[field.oneof_index]) == 1 and type_context.oneof_required[
field.oneof_index]:
field_annotations = ['*REQUIRED*']
if len(type_context.oneof_fields[field.oneof_index]) > 1:
# Fields in oneof shouldn't be marked as required when we have oneof comment below it.
field_annotations = []
oneof_template = '\nPrecisely one of %s must be set.\n' if type_context.oneof_required[
field.oneof_index] else '\nOnly one of %s may be set.\n'
formatted_oneof_comment += oneof_template % ', '.join(
FormatInternalLink(
f,
FieldCrossRefLabel(NormalizeTypeContextName(
outer_type_context.ExtendField(i, f).name)))
for i, f in type_context.oneof_fields[field.oneof_index])
else:
formatted_oneof_comment = ''
# If there is a udpa.annotations.security option, include it after the comment.
if field.options.HasExtension(security_pb2.security):
manifest_description = protodoc_manifest.fields.get(type_context.name)
if not manifest_description:
raise ProtodocError('Missing protodoc manifest YAML for %s' % type_context.name)
formatted_security_options = FormatSecurityOptions(
field.options.Extensions[security_pb2.security], field, type_context,
manifest_description.edge_config)
else:
formatted_security_options = ''
pretty_label_names = {
field.LABEL_OPTIONAL: '',
field.LABEL_REPEATED: '**repeated** ',
}
comment = '(%s) ' % ', '.join(
[pretty_label_names[field.label] + FormatFieldType(type_context, field)] +
field_annotations) + formatted_leading_comment
return anchor + field.name + '\n' + MapLines(functools.partial(
Indent, 2), comment + formatted_oneof_comment) + formatted_security_options
def FormatMessageAsDefinitionList(type_context, msg, protodoc_manifest):
"""Format a DescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
"""
type_context.oneof_fields = defaultdict(list)
type_context.oneof_required = defaultdict(bool)
type_context.oneof_names = defaultdict(list)
for index, field in enumerate(msg.field):
if field.HasField('oneof_index'):
leading_comment = type_context.ExtendField(index, field.name).leading_comment
if HideNotImplemented(leading_comment):
continue
type_context.oneof_fields[field.oneof_index].append((index, field.name))
for index, oneof_decl in enumerate(msg.oneof_decl):
if oneof_decl.options.HasExtension(validate_pb2.required):
type_context.oneof_required[index] = oneof_decl.options.Extensions[validate_pb2.required]
type_context.oneof_names[index] = oneof_decl.name
return '\n'.join(
FormatFieldAsDefinitionListItem(type_context, type_context.ExtendField(index, field.name),
field, protodoc_manifest)
for index, field in enumerate(msg.field)) + '\n'
def FormatEnumValueAsDefinitionListItem(type_context, enum_value):
"""Format a EnumValueDescriptorProto as RST definition list item.
Args:
type_context: contextual information for message/enum/field.
enum_value: EnumValueDescriptorProto.
Returns:
RST formatted definition list item.
"""
anchor = FormatAnchor(EnumValueCrossRefLabel(NormalizeTypeContextName(type_context.name)))
default_comment = '*(DEFAULT)* ' if enum_value.number == 0 else ''
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment)
if HideNotImplemented(leading_comment):
return ''
comment = default_comment + UNICODE_INVISIBLE_SEPARATOR + formatted_leading_comment
return anchor + enum_value.name + '\n' + MapLines(functools.partial(Indent, 2), comment)
def FormatEnumAsDefinitionList(type_context, enum):
"""Format a EnumDescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
enum: DescriptorProto.
Returns:
RST formatted definition list item.
"""
return '\n'.join(
FormatEnumValueAsDefinitionListItem(type_context.ExtendEnumValue(index, enum_value.name),
enum_value)
for index, enum_value in enumerate(enum.value)) + '\n'
def FormatProtoAsBlockComment(proto):
"""Format a proto as a RST block comment.
Useful in debugging, not usually referenced.
"""
return '\n\nproto::\n\n' + MapLines(functools.partial(Indent, 2), str(proto)) + '\n'
class RstFormatVisitor(visitor.Visitor):
"""Visitor to generate a RST representation from a FileDescriptor proto.
See visitor.Visitor for visitor method docs comments.
"""
def __init__(self):
r = runfiles.Create()
with open(r.Rlocation('envoy/docs/protodoc_manifest.yaml'), 'r') as f:
# Load as YAML, emit as JSON and then parse as proto to provide type
# checking.
protodoc_manifest_untyped = yaml.safe_load(f.read())
self.protodoc_manifest = manifest_pb2.Manifest()
json_format.Parse(json.dumps(protodoc_manifest_untyped), self.protodoc_manifest)
def VisitEnum(self, enum_proto, type_context):
normal_enum_type = NormalizeTypeContextName(type_context.name)
anchor = FormatAnchor(EnumCrossRefLabel(normal_enum_type))
header = FormatHeader('-', 'Enum %s' % normal_enum_type)
github_url = GithubUrl(type_context)
proto_link = FormatExternalLink('[%s proto]' % normal_enum_type, github_url) + '\n\n'
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'enum')
if HideNotImplemented(leading_comment):
return ''
return anchor + header + proto_link + formatted_leading_comment + FormatEnumAsDefinitionList(
type_context, enum_proto)
def VisitMessage(self, msg_proto, type_context, nested_msgs, nested_enums):
# Skip messages synthesized to represent map types.
if msg_proto.options.map_entry:
return ''
normal_msg_type = NormalizeTypeContextName(type_context.name)
anchor = FormatAnchor(MessageCrossRefLabel(normal_msg_type))
header = FormatHeader('-', normal_msg_type)
github_url = GithubUrl(type_context)
proto_link = FormatExternalLink('[%s proto]' % normal_msg_type, github_url) + '\n\n'
leading_comment = type_context.leading_comment
formatted_leading_comment = FormatCommentWithAnnotations(leading_comment, 'message')
if HideNotImplemented(leading_comment):
return ''
return anchor + header + proto_link + formatted_leading_comment + FormatMessageAsJson(
type_context, msg_proto) + FormatMessageAsDefinitionList(
type_context, msg_proto,
self.protodoc_manifest) + '\n'.join(nested_msgs) + '\n' + '\n'.join(nested_enums)
def VisitFile(self, file_proto, type_context, services, msgs, enums):
has_messages = True
if all(len(msg) == 0 for msg in msgs) and all(len(enum) == 0 for enum in enums):
has_messages = False
# TODO(mattklein123): The logic in both the doc and transform tool around files without messages
# is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs
# in the common case.
if (has_messages and
not annotations.DOC_TITLE_ANNOTATION in type_context.source_code_info.file_level_annotations
and file_proto.name.startswith('envoy')):
raise ProtodocError('Envoy API proto file missing [#protodoc-title:] annotation: {}'.format(
file_proto.name))
# Find the earliest detached comment, attribute it to file level.
# Also extract file level titles if any.
header, comment = FormatHeaderFromFile('=', type_context.source_code_info, file_proto.name)
# If there are no messages, we don't include in the doc tree (no support for
# service rendering yet). We allow these files to be missing from the
# toctrees.
if not has_messages:
header = ':orphan:\n\n' + header
warnings = ''
if file_proto.options.HasExtension(status_pb2.file_status):
if file_proto.options.Extensions[status_pb2.file_status].work_in_progress:
warnings += ('.. warning::\n This API is work-in-progress and is '
'subject to breaking changes.\n\n')
debug_proto = FormatProtoAsBlockComment(file_proto)
return header + warnings + comment + '\n'.join(msgs) + '\n'.join(enums) # + debug_proto
def Main():
plugin.Plugin([plugin.DirectOutputDescriptor('.rst', RstFormatVisitor)])
if __name__ == '__main__':
Main()
| tools/protodoc/protodoc.py | 27,312 | Base error class for the protodoc module.
Visitor to generate a RST representation from a FileDescriptor proto.
See visitor.Visitor for visitor method docs comments.
Enum cross reference label.
Enum value cross reference label.
Field cross reference label.
File cross reference label.
Format a label as an Envoy API RST anchor.
Format a comment string with additional RST for annotations.
Args:
comment: comment string.
type_name: optional, 'message' or 'enum' may be specified for additional
message/enum specific annotations.
Returns:
A string with additional RST from annotations.
RST format a string for emphasis.
Format a EnumDescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
enum: DescriptorProto.
Returns:
RST formatted definition list item.
Format a EnumValueDescriptorProto as RST definition list item.
Args:
type_context: contextual information for message/enum/field.
enum_value: EnumValueDescriptorProto.
Returns:
RST formatted definition list item.
Format extension metadata as RST.
Args:
extension: the name of the extension, e.g. com.acme.foo.
Returns:
RST formatted extension description.
Format extension metadata as RST.
Args:
extension_category: the name of the extension_category, e.g. com.acme.
Returns:
RST formatted extension category description.
Format a FieldDescriptorProto as RST definition list item.
Args:
outer_type_context: contextual information for enclosing message.
type_context: contextual information for message/enum/field.
field: FieldDescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
Format a FieldDescriptorProto type description.
Adds cross-refs for message types.
TODO(htuch): Add cross-refs for enums as well.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted field type.
Format FieldDescriptorProto.Type as a pseudo-JSON string.
Args:
type_context: contextual information for message/enum/field.
field: FieldDescriptor proto.
Return: RST formatted pseudo-JSON string representation of field type.
Format RST header.
Args:
style: underline style, e.g. '=', '-'.
text: header text
Returns:
RST formatted header.
Format RST header based on special file level title
Args:
style: underline style, e.g. '=', '-'.
source_code_info: SourceCodeInfo object.
proto_name: If the file_level_comment does not contain a user specified
title, use this as page title.
Returns:
RST formatted header, and file level comment without page title strings.
Format a DescriptorProto as RST definition list.
Args:
type_context: contextual information for message/enum/field.
msg: DescriptorProto.
protodoc_manifest: tools.protodoc.Manifest for proto.
Returns:
RST formatted definition list item.
Format a message definition DescriptorProto as a pseudo-JSON block.
Args:
type_context: contextual information for message/enum/field.
msg: message definition DescriptorProto.
Return: RST formatted pseudo-JSON string representation of message definition.
Format a proto as a RST block comment.
Useful in debugging, not usually referenced.
Obtain data plane API Github URL by path from a TypeContext.
Args:
type_context: type_context.TypeContext for node.
Returns:
A string with a corresponding data plane API GitHub Url.
Should a given type_context.Comment be hidden because it is tagged as [#not-implemented-hide:]?
Indent a string.
Indent a list of strings.
Apply a function across each line in a flat string.
Args:
f: A string transform function for a line.
s: A string consisting of potentially multiple lines.
Returns:
A flat string with f applied to each line.
Message cross reference label.
Normalize a fully qualified field type name, e.g.
.envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
field_fqn: a fully qualified type name from FieldDescriptorProto.type_name.
Return: Normalized type name.
Normalize a type name, e.g.
envoy.foo.bar.
Strips leading ENVOY_API_NAMESPACE_PREFIX and ENVOY_PREFIX.
Args:
type_name: a name from a TypeContext.
Return: Normalized type name.
Remove leading space in flat comment strings.
protoc plugin to map from FileDescriptorProtos to Envoy doc style RST. See https://github.com/google/protobuf/blob/master/src/google/protobuf/descriptor.proto for the underlying protos mentioned in this file. See https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html for Sphinx RST syntax. We have to do some evil things to sys.path due to the way that Python module resolution works; we have both tools/ trees in bazel_tools and envoy. By default, Bazel leaves us with a sys.path in which the @bazel_tools repository takes precedence. Now that we're done with importing runfiles above, we can just remove it from the sys.path. Namespace prefix for Envoy core APIs. Namespace prefix for Envoy top-level APIs. Namespace prefix for WKTs. Namespace prefix for RPCs. http://www.fileformat.info/info/unicode/char/2063/index.htm Template for data plane API URLs. Template for formating extension descriptions. Template for formating extension's category/ies. Template for formating an extension category. A map from the extension security postures (as defined in the envoy_cc_extension build macro) to human readable text for extension docs. A map from the extension status value to a human readable text for extension docs. create an index of extension categories from extension db Raising the error buries the above message in tracebacks. If the oneof only has one field and marked required, mark the field as required. Fields in oneof shouldn't be marked as required when we have oneof comment below it. If there is a udpa.annotations.security option, include it after the comment. Load as YAML, emit as JSON and then parse as proto to provide type checking. Skip messages synthesized to represent map types. TODO(mattklein123): The logic in both the doc and transform tool around files without messages is confusing and should be cleaned up. This is a stop gap to have titles for all proto docs in the common case. Find the earliest detached comment, attribute it to file level. Also extract file level titles if any. If there are no messages, we don't include in the doc tree (no support for service rendering yet). We allow these files to be missing from the toctrees. + debug_proto | 6,495 | en | 0.613065 |
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from . import _utilities
import typing
# Export this package's modules as members:
from .foo import *
from .provider import *
_utilities.register(
resource_modules="""
[
{
"pkg": "example",
"mod": "",
"fqn": "pulumi_example",
"classes": {
"example::Foo": "Foo"
}
}
]
""",
resource_packages="""
[
{
"pkg": "example",
"token": "pulumi:providers:example",
"fqn": "pulumi_example",
"class": "Provider"
}
]
"""
)
| pkg/codegen/testing/test/testdata/simple-methods-schema-single-value-returns/python/pulumi_example/__init__.py | 590 | coding=utf-8 *** WARNING: this file was generated by test. *** *** Do not edit by hand unless you're certain you know what you are doing! *** Export this package's modules as members: | 183 | en | 0.966705 |
from django import forms
from django.contrib.auth.models import User
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404, HttpResponseForbidden, HttpResponse
from django.shortcuts import render
from django.views.generic import TemplateView, View
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic.list import ListView
import json
from .models import Profile, Friendship, Message
from .forms import ProfileRegisterForm
from registration.models import RegistrationProfile
#messagescreate
#def form_valid(self, form):
# user = self.request.user
# form.instance.user = user
# valid_data = super(nomedaview, self).form_valid(form)
# adicione qq coisa apos a mensagem criada
# return valid_data
# Create your views here.
### CREATE ###
class ProfileRegister(CreateView):
model = Profile
form_class = ProfileRegisterForm
success_url = "/accounts/register/complete/"
def form_valid(self, form):
#saving the user first
#user = User.objects.create_user(self.request.POST.get('name'), self.request.POST.get('email'), self.request.POST.get('password'))
#user = User.objects.create_user(form.cleaned_data['name'], form.cleaned_data['email'], form.cleaned_data['password'])
#user.save()
user = RegistrationProfile.objects.create_inactive_user(username=form.cleaned_data['username'],
password=form.cleaned_data['password'],
email=form.cleaned_data['email'],
site=1)
#creating the profile
profile = form.save(commit=False)
#add user_id on profile
profile.user_id = user.id
profile.save()
#saving many to many relationship
form.save_m2m()
return HttpResponseRedirect(self.success_url)
def get_context_data(self, **kwargs):
context = super(ProfileRegister, self).get_context_data(**kwargs)
context["site_name"] = "What's the Craic?"
context["title"] = "- Add Profile"
context["submit_btn"] = "Create Account"
return context
### UPDATE ###
class ProfileUpdate(SuccessMessageMixin, UpdateView):
model = Profile
#form_class = ProfileRegisterForm#
fields = ['name', 'picture', 'nui_id', 'staff', 'native', 'learning', 'about']
widgets={
"about":forms.Textarea(attrs={'class':'form-control materialize-textarea'}),
}
success_url = "/dashboard/"
template_name = 'profiles/profile_edit.html'
success_message = "Profile was updated successfully"
def dispatch(self, request, *args, **kwargs):
user = self.request.user
obj = self.get_object()
if obj.user != user:
messages.error(self.request, 'This profile is not yours.')
return HttpResponseRedirect(reverse('dashboard'))
return super(ProfileUpdate, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(ProfileUpdate, self).get_context_data(**kwargs)
context["site_name"] = "What's the Craic?"
context["title"] = "- Update Profile"
context["submit_btn"] = "Update Account"
return context
class ProfileDetailView(DetailView):
model = Profile
def get_context_data(self, **kwargs):
context = super(ProfileDetailView, self).get_context_data(**kwargs)
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
new_messages_count = profile.get_new_messages_count()
view_profile = Profile.objects.get(id=self.kwargs['pk'])
are_friends = profile.are_friends(view_profile)
waiting = profile.waiting_friendship_approval(view_profile)
if waiting:
if profile == waiting:
context["waiting"] = "WAITING APPROVAL"
else:
context["waiting"] = "ACCEPT REQUEST"
context["profile"] = profile
context["are_friends"] = are_friends
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = "- Add User"
context["submit_btn"] = "Create Account"
return context
class FindFriends(SuccessMessageMixin, TemplateView):
template_name = "findfriends.html"
def get_context_data(self, **kwargs):
context = super(FindFriends, self).get_context_data(**kwargs)
#TESTS WE NEED
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
possible_friends = Profile.find_friends(profile)
friendships_requests = profile.find_friendships_requests()
waiting_approvals = profile.get_waiting_approvals()
new_messages_count = profile.get_new_messages_count()
#ALL CONTEXT VARIABLES
context["profile"] = profile
context["possible_friends"] = possible_friends
context["friendships_requests"] = friendships_requests
context["waiting_approvals"] = waiting_approvals
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = ""
context["submit_btn"] = ""
return context
class AddFriend(SuccessMessageMixin, View):
template_name = "add_friend.html"
def dispatch(self, request, *args, **kwargs):
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
view_profile = Profile.objects.get(id=self.kwargs['pk'])
friendship = Friendship(from_user=profile, to_user=view_profile, status=False)
friendship.save()
messages.info(self.request, 'Friendship requested.')
return HttpResponseRedirect(reverse('profiles:view', kwargs={'pk':self.kwargs['pk']}))
class AcceptFriendship(SuccessMessageMixin, View):
template_name = "add_friend.html"
def dispatch(self, request, *args, **kwargs):
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
view_profile = Profile.objects.get(id=self.kwargs['pk'])
friendship = Friendship.objects.get(from_user=view_profile, to_user=profile)
friendship.status = True
friendship.save()
messages.info(self.request, 'Friendship accepted.')
return HttpResponseRedirect(reverse('profiles:view', kwargs={'pk':self.kwargs['pk']}))
class SendMessage(SuccessMessageMixin, TemplateView):
template_name = "sendmessage.html"
def get_context_data(self, **kwargs):
context = super(SendMessage, self).get_context_data(**kwargs)
#TESTS WE NEED
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
message_for = Profile.objects.get(id=self.kwargs['pk'])
new_messages_count = profile.get_new_messages_count()
#ALL CONTEXT VARIABLES
context["profile"] = profile
context["message_for"] = message_for
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = ""
context["submit_btn"] = ""
return context
def post(self, request, *args, **kwargs):
#form = self.form_class(request.POST)
form = (self.request.POST)
profile = Profile.objects.get(user=self.request.user)
message_for = Profile.objects.get(id=self.kwargs['pk'])
if form['message']:
message = Message(from_user=profile, to_user=message_for, message=form['message'])
message.save()
messages.success(request, 'Message was sent')
return HttpResponseRedirect(reverse('profiles:sentmessages'))
class SentMessages(SuccessMessageMixin, ListView):
template_name = "sentmessages.html"
paginate_by = 10
context_object_name = "sent_messages"
def get_queryset(self):
profile = Profile.objects.get(user=self.request.user)
return Message.objects.filter(from_user = profile).order_by('-date')
def get_context_data(self, **kwargs):
context = super(SentMessages, self).get_context_data(**kwargs)
#TESTS WE NEED
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
#sent_messages = Message.objects.filter(from_user = profile).order_by('-date')
new_messages_count = profile.get_new_messages_count()
#ALL CONTEXT VARIABLES
context["profile"] = profile
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = ""
context["submit_btn"] = ""
return context
class Inbox(SuccessMessageMixin, ListView):
template_name = "inbox.html"
paginate_by = 10
context_object_name = "inbox_messages"
def get_queryset(self):
profile = Profile.objects.get(user=self.request.user)
return Message.objects.filter(to_user = profile).order_by('-date')
def get_context_data(self, **kwargs):
context = super(Inbox, self).get_context_data(**kwargs)
#TESTS WE NEED
user = self.request.user
profile = Profile.objects.get(user=self.request.user)
new_messages_count = profile.get_new_messages_count()
#ALL CONTEXT VARIABLES
context["profile"] = profile
context["new_messages_count"] = new_messages_count
context["site_name"] = "What's the Craic?"
context["title"] = ""
context["submit_btn"] = ""
return context
class VizualizedMessage(SuccessMessageMixin, View):
template_name = "add_friend.html"
def dispatch(self, request, *args, **kwargs):
response_data = {}
message = Message.objects.get(id=request.POST.get('messageId'))
message.visualized = True
message.save()
response_data['result'] = 'Message visualized!'
response_data['visualized'] = message.visualized
return HttpResponse(
json.dumps(response_data),
content_type="application/json"
) | src/profiles/views.py | 9,246 | messagescreatedef form_valid(self, form): user = self.request.user form.instance.user = user valid_data = super(nomedaview, self).form_valid(form) adicione qq coisa apos a mensagem criada return valid_data Create your views here. CREATE saving the user firstuser = User.objects.create_user(self.request.POST.get('name'), self.request.POST.get('email'), self.request.POST.get('password'))user = User.objects.create_user(form.cleaned_data['name'], form.cleaned_data['email'], form.cleaned_data['password'])user.save()creating the profileadd user_id on profilesaving many to many relationship UPDATE form_class = ProfileRegisterForm TESTS WE NEEDALL CONTEXT VARIABLESTESTS WE NEEDALL CONTEXT VARIABLESform = self.form_class(request.POST)TESTS WE NEEDsent_messages = Message.objects.filter(from_user = profile).order_by('-date')ALL CONTEXT VARIABLESTESTS WE NEEDALL CONTEXT VARIABLES | 879 | en | 0.471129 |
# pylint: disable=R,C,W
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Author : Nasir Khan (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2018-2025 Nasir Khan (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from udemy.compat import time, sys
from udemy.logger import logger
from udemy.extract import Udemy
from udemy.shared import (
UdemyCourse,
UdemyCourses,
UdemyChapters,
UdemyLectures,
UdemyLectureStream,
UdemyLectureAssets,
UdemyLectureSubtitles,
)
class InternUdemyCourses(UdemyCourses, Udemy):
def __init__(self, *args, **kwargs):
super(InternUdemyCourses, self).__init__(*args, **kwargs)
def _fetch_course(self):
auth = {}
if not self._cookies:
auth = self._login(username=self._username, password=self._password)
if not auth and self._cookies:
auth = self._login(cookies=self._cookies)
if auth.get("login") == "successful":
logger.info(msg="Logged in successfully.", new_line=True)
logger.info(msg="Fetching all enrolled course(s) url(s)..")
self._courses = self._extract_subscribed_courses()
time.sleep(1)
logger.success(msg="Fetching all enrolled course(s) url(s).. ")
self._logout()
if auth.get("login") == "failed":
logger.error(msg="Failed to login ..\n")
sys.exit(0)
class InternUdemyCourse(UdemyCourse, Udemy):
def __init__(self, *args, **kwargs):
self._info = ""
super(InternUdemyCourse, self).__init__(*args, **kwargs)
def _fetch_course(self):
if self._have_basic:
return
auth = {}
if not self._cookies:
auth = self._login(username=self._username, password=self._password)
if not auth and self._cookies:
auth = self._login(cookies=self._cookies)
if auth.get("login") == "successful":
logger.info(msg="Logged in successfully.", new_line=True)
logger.info(msg="Downloading course information ..")
self._info = self._real_extract(
self._url, skip_hls_stream=self._skip_hls_stream
)
time.sleep(1)
logger.success(msg="Downloaded course information .. ")
access_token = self._info["access_token"]
self._id = self._info["course_id"]
self._title = self._info["course_title"]
self._chapters_count = self._info["total_chapters"]
self._total_lectures = self._info["total_lectures"]
self._chapters = [
InternUdemyChapter(z, access_token=access_token)
for z in self._info["chapters"]
]
logger.info(
msg="Trying to logout now...",
new_line=True,
)
if not self._cookies:
self._logout()
logger.info(
msg="Logged out successfully.",
new_line=True,
)
self._have_basic = True
if auth.get("login") == "failed":
logger.error(msg="Failed to login ..\n")
sys.exit(0)
class InternUdemyChapter(UdemyChapters):
def __init__(self, chapter, access_token=None):
super(InternUdemyChapter, self).__init__()
self._chapter_id = chapter["chapter_id"]
self._chapter_title = chapter["chapter_title"]
self._chapter_index = chapter["chapter_index"]
self._lectures_count = chapter.get("lectures_count", 0)
self._lectures = (
[
InternUdemyLecture(z, access_token=access_token)
for z in chapter["lectures"]
]
if self._lectures_count > 0
else []
)
class InternUdemyLecture(UdemyLectures):
def __init__(self, lectures, access_token=None):
super(InternUdemyLecture, self).__init__()
self._access_token = access_token
self._info = lectures
self._lecture_id = self._info["lectures_id"]
self._lecture_title = self._info["lecture_title"]
self._lecture_index = self._info["lecture_index"]
self._subtitles_count = self._info.get("subtitle_count", 0)
self._sources_count = self._info.get("sources_count", 0)
self._assets_count = self._info.get("assets_count", 0)
self._extension = self._info.get("extension")
self._html_content = self._info.get("html_content")
self._duration = self._info.get("duration")
if self._duration:
duration = int(self._duration)
(mins, secs) = divmod(duration, 60)
(hours, mins) = divmod(mins, 60)
if hours == 0:
self._duration = "%02d:%02d" % (mins, secs)
else:
self._duration = "%02d:%02d:%02d" % (hours, mins, secs)
def _process_streams(self):
streams = (
[InternUdemyLectureStream(z, self) for z in self._info["sources"]]
if self._sources_count > 0
else []
)
self._streams = sorted(streams, key=lambda k: k.quality)
self._streams = sorted(self._streams, key=lambda k: k.mediatype)
def _process_assets(self):
assets = (
[InternUdemyLectureAssets(z, self) for z in self._info["assets"]]
if self._assets_count > 0
else []
)
self._assets = assets
def _process_subtitles(self):
subtitles = (
[InternUdemyLectureSubtitles(z, self) for z in self._info["subtitles"]]
if self._subtitles_count > 0
else []
)
self._subtitles = subtitles
class InternUdemyLectureStream(UdemyLectureStream):
def __init__(self, sources, parent):
super(InternUdemyLectureStream, self).__init__(parent)
self._mediatype = sources.get("type")
self._extension = sources.get("extension")
self._token = parent._access_token
height = sources.get("height", "0")
width = sources.get("width", "0")
self._resolution = "%sx%s" % (width, height)
self._dimension = width, height
self._quality = int(height)
self._is_hls = "hls" in self._mediatype
self._url = sources.get("download_url")
class InternUdemyLectureAssets(UdemyLectureAssets):
def __init__(self, assets, parent):
super(InternUdemyLectureAssets, self).__init__(parent)
self._mediatype = assets.get("type")
self._extension = assets.get("extension")
title = assets.get("title", "")
if not title:
title = assets.get("filename")
if title and title.endswith(self._extension):
ok = "{0:03d} ".format(parent._lecture_index) + title
self._filename = ok
else:
ok = "{0:03d} ".format(parent._lecture_index) + assets.get("filename")
self._filename = ok
self._url = assets.get("download_url")
class InternUdemyLectureSubtitles(UdemyLectureSubtitles):
def __init__(self, subtitles, parent):
super(InternUdemyLectureSubtitles, self).__init__(parent)
self._mediatype = subtitles.get("type")
self._extension = subtitles.get("extension")
self._language = subtitles.get("language")
self._url = subtitles.get("download_url")
| udemy/internal.py | 8,378 | Author : Nasir Khan (r0ot h3x49)
Github : https://github.com/r0oth3x49
License : MIT
Copyright (c) 2018-2025 Nasir Khan (r0ot h3x49)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the
Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR
ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH
THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
pylint: disable=R,C,W!/usr/bin/env python3 -*- coding: utf-8 -*- | 1,229 | en | 0.824612 |
import argparse
import datetime
import time
import torch
from torch import distributed as dist
from torch.nn import DataParallel
from torch.nn.parallel.distributed import DistributedDataParallel
from distillation.tool import DistillationBox
from models import load_ckpt, get_model, save_ckpt, set_bottleneck_transformer
from myutils.common import file_util, yaml_util
from myutils.pytorch import func_util, module_util
from utils import data_util, main_util, misc_util
from models.mimic.base import set_width
from models.slimmable.compute_post_bn import ComputeBN
from torch.nn.modules.batchnorm import _BatchNorm
def freeze_batch_norm_outside_bottleneck(model):
for m in model.modules():
if isinstance(m, _BatchNorm):
m.eval()
model.backbone_net.bottleneck.train()
def get_argparser():
argparser = argparse.ArgumentParser(description='Mimic Runner')
argparser.add_argument('--config', required=True, help='yaml file path')
argparser.add_argument('--device', default='cuda', help='device')
argparser.add_argument('--json', help='dictionary to overwrite config')
argparser.add_argument('-distill', action='store_true', help='distill a teacher model')
argparser.add_argument('-skip_teacher_eval', action='store_true', help='skip teacher model evaluation in testing')
argparser.add_argument('-transform_bottleneck', action='store_true',
help='use bottleneck transformer (if defined in yaml) in testing')
argparser.add_argument('-post_bn', action='store_true', help='use post traing batch norm calculation')
# distributed training parameters
argparser.add_argument('--world_size', default=1, type=int, help='number of distributed processes')
argparser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return argparser
def freeze_modules(student_model, student_model_config):
if 'frozen_modules' in student_model_config:
for student_path in student_model_config['frozen_modules']:
student_module = module_util.get_module(student_model, student_path)
module_util.freeze_module_params(student_module)
elif 'unfrozen_modules' in student_model_config:
module_util.freeze_module_params(student_model)
for student_path in student_model_config['unfrozen_modules']:
student_module = module_util.get_module(student_model, student_path)
module_util.unfreeze_module_params(student_module)
def distill_model(distillation_box, data_loader, optimizer, log_freq, device, epoch):
metric_logger = misc_util.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', misc_util.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
lr_scheduler = None
if epoch == 0:
warmup_factor = 1.0 / 1000.0
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = main_util.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for images, targets in metric_logger.log_every(data_loader, log_freq, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
optimizer.zero_grad()
loss = distillation_box(images, targets)
loss.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=loss)
metric_logger.update(lr=optimizer.param_groups[0]['lr'])
# torch.cuda.empty_cache()
def distill(teacher_model, student_model, train_sampler, train_data_loader, val_data_loader,
device, distributed, distill_backbone_only, config, args):
train_config = config['train']
student_config = config['student_model']
distillation_box = DistillationBox(teacher_model, student_model,
train_config['criterion'], student_config)
ckpt_file_path = config['student_model']['ckpt']
optim_config = train_config['optimizer']
optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params'])
scheduler_config = train_config['scheduler']
lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params'])
if file_util.check_if_exists(ckpt_file_path):
best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler)
save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)
def main(args):
config = yaml_util.load_yaml_file(args.config)
if args.json is not None:
main_util.overwrite_config(config, args.json)
distributed, device_ids = main_util.init_distributed_mode(args.world_size, args.dist_url)
device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
teacher_model = get_model(config['teacher_model'], device)
module_util.freeze_module_params(teacher_model)
student_model_config = config['student_model']
student_model = get_model(student_model_config, device)
freeze_modules(student_model, student_model_config)
ckpt_file_path = config['student_model']['ckpt']
train_config = config['train']
optim_config = train_config['optimizer']
optimizer = func_util.get_optimizer(student_model, optim_config['type'], optim_config['params'])
scheduler_config = train_config['scheduler']
lr_scheduler = func_util.get_scheduler(optimizer, scheduler_config['type'], scheduler_config['params'])
if file_util.check_if_exists(ckpt_file_path):
best_val_map, _, _ = load_ckpt(ckpt_file_path, optimizer=optimizer, lr_scheduler=lr_scheduler)
save_ckpt(student_model, optimizer, lr_scheduler, best_val_map, config, args, ckpt_file_path)
if __name__ == '__main__':
parser = get_argparser()
main(parser.parse_args())
| src/mimic_fix.py | 5,929 | distributed training parameters torch.cuda.empty_cache() | 56 | en | 0.300212 |
#!/usr/bin/env python3
from fairseq import models
from fairseq.data import FairseqDataset, data_utils
from fairseq.models import FairseqMultiModel
from fairseq.tasks.multilingual_translation import MultilingualTranslationTask
from pytorch_translate.data import iterators as ptt_iterators
class PyTorchTranslateMultiTask(MultilingualTranslationTask):
def build_model(self, args):
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError(
"PyTorchTranslateMultiTask requires a FairseqMultiModel architecture"
)
return model
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
):
assert isinstance(dataset, FairseqDataset)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
indices = data_utils.filter_by_size(
indices,
dataset.size,
max_positions,
raise_exception=(not ignore_invalid_inputs),
)
# create mini-batches with given size constraints
batch_sampler = data_utils.batch_by_size(
indices,
dataset.num_tokens,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
return ptt_iterators.WeightedEpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
weights=self.loss_weights,
)
def max_positions(self):
"""Return None to allow model to dictate max sentence length allowed"""
return None
| pytorch_translate/tasks/pytorch_translate_multi_task.py | 2,202 | Return None to allow model to dictate max sentence length allowed
!/usr/bin/env python3 get indices ordered by example size filter examples that are too large create mini-batches with given size constraints return a reusable, sharded iterator | 243 | en | 0.789392 |
#!/usr/bin/python
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
"""
Swift upload image file.
This file contains implementation of gett
swift download URL for the uploaded image file
"""
from builtins import object
from builtins import str
from future import standard_library
standard_library.install_aliases() # noqa
import logging
import re
from threading import RLock
import time
from urllib.parse import urlparse
from ansible.module_utils.fabric_utils import FabricAnsibleModule
import requests
import swiftclient
import swiftclient.utils
DOCUMENTATION = '''
---
module: Swift file util
author: Juniper Networks
short_description: Private module to get swift download url of the image file
description:
- Pass the required swift config info get the download url of image file.
requirements:
-
options:
authtoken:
description:
- authentication token string
required: true
authurl:
description:
- authentication url string
required: true
user:
description:
- Swift username
type: string
required: true
key:
description:
- Swift password
type: string
required: true
tenant_name:
description:
- Tenant name.
type: string
required: false
default: 'admin'
auth_version:
description:
- Keystone Auth version.
required: false
default: '3.0'
temp_url_key:
description:
- Temp url key
required: true
temp_url_key_2:
description:
- Temp url key 2
required: true
connection_retry_count:
description:
- Connection retry count
type: int
required: false
default: 5
chosen_temp_url_key:
description:
- Chosen Temp url key
required: false
default: 'temp_url_key'
container_name:
description:
- Name of the container
required: true
filename:
description:
- Name of the image file
required: true
'''
EXAMPLES = '''
'''
RETURN = '''
url:
description:
- An image file url used to download the file without authentication.
returned: on success always
type: str
error_msg:
description:
- Its an error message that is returned if there is any exception or error.
returned: on failure
type: str
'''
connection_lock = RLock()
class FileSvcUtil(object): # pragma: no cover
def __init__(self, authtoken, authurl, user, key, tenant_name,
auth_version, container_name, temp_url_key,
temp_url_key2, connection_retry_count, chosen_temp_url_key):
"""Init routine."""
self.requests = requests
self.authurl = authurl
self.preauthtoken = authtoken
self.user = user
self.key = key
self.auth_version = auth_version
self.container_name = container_name
self.temp_url_key = temp_url_key
self.temp_url_key_2 = temp_url_key2
self.connection_retry_count = connection_retry_count
self.chosen_temp_url_key = chosen_temp_url_key
self.conn_timeout_sec = 10
self.tenant_name = tenant_name
self.generateToken()
self.updateAccount()
def generateToken(self):
retry_count = 0
incr_sleep = 10
while retry_count <= self.connection_retry_count:
try:
acquired = connection_lock.acquire()
swiftconn = swiftclient.client.Connection(
authurl=self.authurl,
user=self.user,
key=self.key,
preauthtoken=self.preauthtoken,
tenant_name=self.tenant_name,
auth_version=self.auth_version,
timeout=self.conn_timeout_sec,
insecure=True)
self.swift_conn = swiftconn
swiftconn.get_account()
self.storageurl = swiftconn.url
break
except Exception as e:
retry_count += 1
err_msg = e.message
logging.error(err_msg)
if retry_count == self.connection_retry_count:
raise Exception(
"Connection failed with swift server: " +
str(err_msg))
logging.error(
"Connection failed with swift server, retrying..")
incr_sleep *= 2
time.sleep(incr_sleep)
finally:
if acquired:
connection_lock.release()
def updateAccount(self):
headers = {'Temp-URL-Key': self.temp_url_key}
if self.temp_url_key_2 is not None:
headers['Temp-URL-Key-2'] = self.temp_url_key_2
try:
self.swift_conn.post_account(headers)
except Exception as err:
logging.error(str(err))
raise Exception(
"Update account failed with swift file server: " +
str(err))
def getobjectFileUri(self, filename):
return self.getFileObjUri(self.container_name, filename)
def getFileObjUri(self, container_name, fileobj_name):
return urlparse('/%s/%s' % (container_name, fileobj_name)).path
def getObjUrl(self, filename):
image_path = self.getobjectFileUri(filename)
try:
image_url = self.getPublicDownloadUrl(image_path)
return image_url
except Exception as e:
logging.error(str(e))
raise Exception(
"Get object url failed with swift file server: " + str(e))
def getPublicDownloadUrl(self, image_path):
return '%s/%s' % (
re.sub(r'([^/])/*$', r'\1', self.storageurl),
re.sub(r'^/*([^/])', r'\1', image_path))
def close(self):
if self.swift_conn:
self.swift_conn.close()
def main():
module = FabricAnsibleModule(
argument_spec=dict(
authtoken=dict(required=True),
authurl=dict(required=True),
user=dict(required=True),
key=dict(required=True),
tenant_name=dict(required=False, default="admin"),
auth_version=dict(required=False, default='3.0'),
temp_url_key=dict(required=True),
temp_url_key_2=dict(required=True),
chosen_temp_url_key=dict(required=False,
default="temp_url_key"),
container_name=dict(required=True),
filename=dict(required=True),
connection_retry_count=dict(required=False,
default=5, type='int')),
supports_check_mode=False)
m_args = module.params
authtoken = m_args['authtoken']
authurl = m_args['authurl']
user = m_args['user']
key = m_args['key']
tenant_name = m_args['tenant_name']
auth_version = m_args['auth_version']
temp_url_key = m_args['temp_url_key']
temp_url_key_2 = m_args['temp_url_key_2']
chosen_temp_url_key = m_args['chosen_temp_url_key']
container_name = m_args['container_name']
filename = m_args['filename']
connection_retry_count = m_args['connection_retry_count']
url = None
error_msg = ''
try:
fileutil = FileSvcUtil(
authtoken,
authurl,
user,
key,
tenant_name,
auth_version,
container_name,
temp_url_key,
temp_url_key_2,
connection_retry_count,
chosen_temp_url_key)
url = fileutil.getObjUrl(filename)
fileutil.close()
except Exception as e:
error_msg = "Exception occurred in swift_fileutil: " + str(e)
results = {}
results['url'] = url
results['error_msg'] = error_msg
module.exit_json(**results)
if __name__ == '__main__':
main()
| src/config/fabric-ansible/ansible-playbooks/library/swift_fileutil.py | 8,077 | Init routine.
Swift upload image file.
This file contains implementation of gett
swift download URL for the uploaded image file
!/usr/bin/python Copyright (c) 2018 Juniper Networks, Inc. All rights reserved. noqa pragma: no cover | 231 | en | 0.881459 |
import sys
import logging
logger = logging.getLogger(__name__)
import base64
import json
import datetime
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives.asymmetric import padding
from mqfactory.message.security import Signature
import socket
class Decoded(object):
def __init__(self, keys):
self.keys = keys
def __getitem__(self, name):
key = self.keys[name]
if not isinstance(key["public"], rsa.RSAPublicKey):
return {
"private": decode(key["private"]),
"public" : decode(key["public"])
}
return key
class RsaSignature(Signature):
def __init__(self, keys, me=socket.gethostname()):
self.keys = Decoded(keys)
self.me = me
self.key = self.keys[self.me]["private"]
def _sign(self, message, ts=None):
logger.debug("signing {0}".format(message.id))
message.tags["signature"] = {
"origin" : self.me,
"ts" : ts or str(datetime.datetime.utcnow())
}
payload = serialize(message)
message.tags["signature"]["hash"] = base64.b64encode(sign(payload, self.key))
def _validate(self, message):
key = self.keys[message.tags["signature"]["origin"]]["public"]
signature = base64.b64decode(message.tags["signature"].pop("hash"))
payload = serialize(message)
validate(payload, signature, key)
message.tags.pop("signature")
# utility functions wrapping cryptography functions
def generate_key_pair():
key = rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
return key, key.public_key()
def encode(key):
if isinstance(key, rsa.RSAPublicKey):
return key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
return key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
def decode(pem):
try:
pem = pem.encode("ascii","ignore") # unicode -> str
except AttributeError:
pass
if b"PUBLIC KEY" in pem:
return serialization.load_pem_public_key(
pem,
backend=default_backend()
)
else:
return serialization.load_pem_private_key(
pem,
password=None,
backend=default_backend()
)
def serialize(message):
serialized = json.dumps({
"tags" : message.tags,
"payload" : message.payload
}, sort_keys=True).encode("utf-8")
return base64.b64encode(serialized)
def sign(payload, key):
return key.sign(
payload,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
def validate(message, signature, key):
key.verify(
signature,
message,
padding.PSS(
mgf=padding.MGF1(hashes.SHA256()),
salt_length=padding.PSS.MAX_LENGTH
),
hashes.SHA256()
)
| mqfactory/message/security/rsa.py | 3,135 | utility functions wrapping cryptography functions unicode -> str | 64 | en | 0.464802 |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.expm1_run import expm1_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_expm1_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, setdimArgs
("expm1_001", expm1_run, ([4, 3], 'float16')),
("expm1_002", expm1_run, ([4, 16], 'float32')),
("expm1_003", expm1_run, ([4, ], 'float16')),
("expm1_004", expm1_run, ([4, 3, 16], 'float16')),
("expm1_005", expm1_run, ([32, 1024], 'float32')),
]
self.testarg_rpc_cloud = [
# testflag,opfuncname,testRunArgs, setdimArgs
("expm1_006", expm1_run, ([4, 3], 'float16')),
("expm1_007", expm1_run, ([4, 3], 'float32')),
("expm1_008", expm1_run, ([4, ], 'float16')),
("expm1_009", expm1_run, ([4, 3, 16], 'float16')),
("expm1_010", expm1_run, ([32, 1024], 'float32')),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg)
def test_run_rpc_cloud(self):
"""
run case.#
:return:
"""
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("============= {0} Teardown============".format(self.casename))
return
| tests/st/ops/ascend/vector/test_expm1_001.py | 2,796 | clean environment
:return:
run case.#
:return:
run case.#
:return:
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
Copyright 2019 Huawei Technologies Co., Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. TestCase= class: put to tests/*/ testflag,opfuncname,testRunArgs, setdimArgs testflag,opfuncname,testRunArgs, setdimArgs | 875 | en | 0.63936 |
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from gfx.models import Material, Mesh, Shader, Model
import subprocess
import os
import base64
import platform
def get_mesh( request, mesh_id):
mesh = get_object_or_404(Mesh, pk=mesh_id)
"""
TODO: Make it grab it from the cache instead of generating it
every single time like some savage that can't handle living
in the twenty-first century
"""
if( False ):#exportedMesh.findwith( mesh_id )):
pass
#
# Can't find it, then grab the mesh, and export it
#
processName = None
sys = platform.system()
if( sys == "Darwin" ):
processName = '/Applications/blender.app/Contents/MacOS/blender'
else:
return HttpResponse("Unknown operating system `{}`".format(sys))
if( subprocess.call([processName,"--background", mesh.mesh.name, "--python","./gfx/export.py"]) == 1 ):
return HttpResponse("There was an error")
filename, fileExtension = os.path.splitext(mesh.mesh.name)
newFileContents = open("{0}.js".format(filename)).read()
return HttpResponse(newFileContents)
def get_texture( request, texture_id):
return HttpResponse("You're looking at texture %s." % texture_id )
def get_material( request, material_id):
material = get_object_or_404(Material, pk=material_id)
vertSource = "{}".format( material.getVertex() )
fragSource = "{}".format( material.getFragment() )
return HttpResponse(
"""{{
"id":{0},
"vertex":"{1}",
"fragment":"{2}"
}}
""".format(
material.id,
base64.b64encode( vertSource ),
base64.b64encode( fragSource ),
)
)
def get_shader( request, shader_id):
shader = get_object_or_404(Shader, pk=shader_id)
return HttpResponse(
"""{{
"id":{0},
"tag":"{1}",
"content":"{2}"
}}""".format(
shader.id,
shader.tag,
shader.content
)
)
def search_models( request ):
model = get_object_or_404(Model, name=request.GET.get('tag', None))
return HttpResponse(
"""{{
"id":{0},
"tag":"{1}",
"mesh_id":{2},
"material_id":{3}
}}""".format(
model.id,
model.name,
model.mesh.id,
model.material.id
)
)
| gfx/views.py | 2,127 | exportedMesh.findwith( mesh_id )): Can't find it, then grab the mesh, and export it | 83 | en | 0.735304 |
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import argparse
def plot():
results_dir = './'
results_files = [result for result in os.listdir(results_dir) if 'MAESTROeX' in result]
n_gpus_per_node = 6
throughput_list = []
nnodes_list = []
for results_file in results_files:
nsteps = 0
nzones = 0
time = 0.0
for line in open(results_dir + results_file):
if len(line.split()) == 0:
continue
# Determine the number of MPI ranks and thus the number of nodes.
if len(line.split()) == 6 and line.split()[0] == 'MPI' and line.split()[1] == 'initialized':
n_ranks = int(line.split()[3])
n_nodes = max(1, n_ranks / n_gpus_per_node)
# For each step, add up the number of zones advanced and the walltime
# for that step.
if len(line.split()) == 4 and line.split()[0] == 'Level' and line.split()[1] == '0,' and line.split()[3] == 'cells':
nsteps += 1
nzones += int(line.split()[2])
if len(line.split()) == 6 and line.split()[0] == 'Time' and line.split()[2] == 'advance':
time += float(line.split()[5])
nnodes_list.append(n_nodes)
throughput_list.append(nzones / time / 1.e6)
# Now we have all the results, so plot them.
nnodes_arr = np.array(nnodes_list)
throughput_arr = np.array(throughput_list)
throughput_arr = np.array([x for _, x in sorted(zip(nnodes_arr, throughput_arr))])
nnodes_arr = sorted(nnodes_arr)
throughput_arr = throughput_arr / throughput_arr[0] / nnodes_arr
plt.plot(nnodes_arr, throughput_arr, linestyle='-', lw=4, marker='o', markersize=14)
plt.xlim([0.9 * min(nnodes_arr), 1.1 * max(nnodes_arr)])
plt.ylim([0, 1.25 * max(throughput_arr)])
plt.ylabel('Throughput (normalized)', fontsize=20)
plt.xlabel('Number of nodes', fontsize=20)
plt.title('Weak scaling of MAESTROeX reacting bubble', fontsize=20)
plt.xscale('log', basex=2)
ax = plt.gca()
ax.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter())
plt.xticks([1, 2, 4, 8, 16, 32, 64, 128])
plt.tick_params(labelsize=14)
plt.tight_layout()
plt.savefig('scaling.eps')
plt.savefig('scaling.png')
def main():
plot()
if __name__ == "__main__":
main()
| Exec/test_problems/reacting_bubble/scaling/sc20/plot.py | 2,448 | Determine the number of MPI ranks and thus the number of nodes. For each step, add up the number of zones advanced and the walltime for that step. Now we have all the results, so plot them. | 189 | en | 0.876496 |
import math
# 1
area_of_circle = lambda r: math.pi * r ** 2
print(area_of_circle(10))
# 2
calculation = lambda x, y: ((x + y), (x - y))
print(calculation(4, 2))
# 3
def product(n):
if n == 1:
return 1
else:
return n * product(n - 1)
print(product(5))
# 4
time = lambda milli: (
round(milli / (1000 * 60 * 60 * 24)), round(milli / (1000 * 60 * 60)), round(milli / (1000 * 60)),
round(milli / 1000))
print(time(10000000))
# 5
showSalary = lambda name, salary=5000: (name, salary)
print(showSalary("A", 1000))
print(showSalary("B", 2000))
print(showSalary("C"))
# 6
diff = lambda x, y: x - y
print(diff(10, 12))
# 7
printer = lambda x, y, z: (x, str(x), y, str(y), z, str(z))
print(printer(True, 22.25, 'yes'))
# 8
data = [
('Alpha Centauri A', 4.3, 0.26, 1.56),
('Alpha Centauri B', 4.3, 0.077, 0.45),
('Alpha Centauri C', 4.2, 0.00001, 0.00006),
("Barnard's Star", 6.0, 0.00004, 0.0005),
('Wolf 359', 7.7, 0.000001, 0.00002),
('BD +36 degrees 2147', 8.2, 0.0003, 0.006),
('Luyten 726-8 A', 8.4, 0.000003, 0.00006),
('Luyten 726-8 B', 8.4, 0.000002, 0.00004),
('Sirius A', 8.6, 1.00, 23.6),
('Sirius B', 8.6, 0.001, 0.003),
('Ross 154', 9.4, 0.00002, 0.0005),
]
# data.sort()
print(data)
print(sorted(data)) | nus-bead/examples/Workshop01.py | 1,292 | 1 2 3 4 5 6 7 8 data.sort() | 27 | fr | 0.096059 |
import threading
import time
class PyMbsThread(threading.Thread):
def __init__(self, function, realTime=False):
threading.Thread.__init__(self)
self.execute = False
self.function = function
self.realTime = realTime
self.scaling = 1
# Start-Time (Model)
self.model_offset = 0.0
# Start-Time (Real)
self.real_offset = 0.0
def reinit(self):
if (self.execute):
self.stop()
self.__init__(self.function, self.realTime)
def run(self):
# Initialise Offsets
self.real_offset = time.time()
self.model_offset = self.function()
t = self.model_offset
# Debug
# print "Starting Thread " + str(id(self))
# Endless Loop
self.execute = True
while self.execute:
# synchronise with real time
if (self.realTime):
# Real Elapsed Time
real = self.scaling*(time.time() - self.real_offset)
# Model Elapsed Time
model = t - self.model_offset
# Difference
deltaT = model-real
if (deltaT > 0):
time.sleep(deltaT)
# Execute next step
t = self.function()
# Debug
# print "Finished Thread " + str(id(self))
def stop(self):
self.execute = False
# Debug
# print "Stopped Thread " + str(id(self))
'''
Usage:
======
def myFunc():
print 'doing something'
time.sleep(1)
t = PymbsThread(myFunc)
t.start() # starts Thread
t.stop() # stop Thread
t.reinit() # "reset" thread
t.start() # start Thread again
t.stop()
''' | pymbs/ui/thread.py | 1,721 | Start-Time (Model) Start-Time (Real) Initialise Offsets Debug print "Starting Thread " + str(id(self)) Endless Loop synchronise with real time Real Elapsed Time Model Elapsed Time Difference Execute next step Debug print "Finished Thread " + str(id(self)) Debug print "Stopped Thread " + str(id(self)) | 301 | en | 0.714176 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cloudblue.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| manage.py | 629 | Django's command-line utility for administrative tasks.
!/usr/bin/env python | 77 | en | 0.656913 |
"""Config flow for Coronavirus integration."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.data_entry_flow import FlowResult
from . import get_coordinator
from .const import DOMAIN, OPTION_WORLDWIDE
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Coronavirus."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
_options = None
async def async_step_user(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Handle the initial step."""
errors: dict[str, str] = {}
if self._options is None:
coordinator = await get_coordinator(self.hass)
if not coordinator.last_update_success or coordinator.data is None:
return self.async_abort(reason="cannot_connect")
self._options = {OPTION_WORLDWIDE: "Worldwide"}
for case in sorted(
coordinator.data.values(), key=lambda case: case.country
):
self._options[case.country] = case.country
if user_input is not None:
await self.async_set_unique_id(user_input["country"])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=self._options[user_input["country"]], data=user_input
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Required("country"): vol.In(self._options)}),
errors=errors,
)
| homeassistant/components/coronavirus/config_flow.py | 1,647 | Handle a config flow for Coronavirus.
Config flow for Coronavirus integration. | 78 | en | 0.583121 |
from django_cradmin import crinstance, crapp
from django_cradmin.crinstance import reverse_cradmin_url
from devilry.apps.core.models import Period
from devilry.devilry_account.models import PeriodPermissionGroup
from devilry.devilry_admin.cradminextensions import devilry_crmenu_admin
from devilry.devilry_cradmin import devilry_crmenu
from devilry.devilry_cradmin import devilry_crinstance
from devilry.devilry_admin.views.period import admins
from devilry.devilry_admin.views.period import createassignment
from devilry.devilry_admin.views.period import examiners
from devilry.devilry_admin.views.period import overview
from devilry.devilry_admin.views.period import students
from devilry.devilry_admin.views.period import edit
from devilry.devilry_admin.views.period import overview_all_results
from devilry.devilry_qualifiesforexam import cradmin_app as qualifiesforexam
from devilry.devilry_admin.views.period.manage_tags import manage_tags
class Menu(devilry_crmenu_admin.Menu):
def build_menu(self):
super(Menu, self).build_menu()
period = self.request.cradmin_role
self.add_role_menuitem_object()
self.add_subject_breadcrumb_item(subject=period.subject)
self.add_period_breadcrumb_item(period=period, active=True)
def add_subject_breadcrumb_item(self, subject, active=False):
if self.cradmin_instance.get_devilryrole_for_requestuser() == 'periodadmin':
return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem(
label=subject.short_name,
url=reverse_cradmin_url(
instanceid='devilry_admin_subject_for_periodadmin',
appname='overview',
roleid=subject.id,
viewname=crapp.INDEXVIEW_NAME
),
active=active
))
else:
return self.add_headeritem_object(devilry_crmenu.BreadcrumbMenuItem(
label=subject.short_name,
url=reverse_cradmin_url(
instanceid='devilry_admin_subjectadmin',
appname='overview',
roleid=subject.id,
viewname=crapp.INDEXVIEW_NAME
),
active=active
))
class CrAdminInstance(devilry_crinstance.BaseCrInstanceAdmin):
menuclass = Menu
roleclass = Period
apps = [
('overview', overview.App),
('students', students.App),
('examiners', examiners.App),
('admins', admins.App),
('createassignment', createassignment.App),
('edit', edit.App),
('overview_all_results', overview_all_results.App),
('qualifiesforexam', qualifiesforexam.App),
('manage_tags', manage_tags.App),
]
id = 'devilry_admin_periodadmin'
rolefrontpage_appname = 'overview'
def get_rolequeryset(self):
return Period.objects.filter_user_is_admin(user=self.request.user)\
.order_by('-start_time')
def get_titletext_for_role(self, role):
"""
Get a short title briefly describing the given ``role``.
Remember that the role is n Period.
"""
period = role
return period
@classmethod
def matches_urlpath(cls, urlpath):
return urlpath.startswith('/devilry_admin/period')
def __get_devilryrole_for_requestuser(self):
period = self.request.cradmin_role
devilryrole = PeriodPermissionGroup.objects.get_devilryrole_for_user_on_period(
user=self.request.user,
period=period
)
if devilryrole is None:
raise ValueError('Could not find a devilryrole for request.user. This must be a bug in '
'get_rolequeryset().')
return devilryrole
def get_devilryrole_for_requestuser(self):
"""
Get the devilryrole for the requesting user on the current
period (request.cradmin_instance).
The return values is the same as for
:meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,
exept that this method raises ValueError if it does not find a role.
"""
if not hasattr(self, '_devilryrole_for_requestuser'):
self._devilryrole_for_requestuser = self.__get_devilryrole_for_requestuser()
return self._devilryrole_for_requestuser
| devilry/devilry_admin/views/period/crinstance_period.py | 4,430 | Get the devilryrole for the requesting user on the current
period (request.cradmin_instance).
The return values is the same as for
:meth:`devilry.devilry_account.models.PeriodPermissionGroupQuerySet.get_devilryrole_for_user_on_period`,
exept that this method raises ValueError if it does not find a role.
Get a short title briefly describing the given ``role``.
Remember that the role is n Period. | 398 | en | 0.690114 |
# qubit number=4
# total number=42
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=10
prog.h(input_qubit[3]) # number=39
prog.cz(input_qubit[0],input_qubit[3]) # number=40
prog.h(input_qubit[3]) # number=41
prog.cx(input_qubit[0],input_qubit[3]) # number=33
prog.x(input_qubit[3]) # number=34
prog.cx(input_qubit[0],input_qubit[3]) # number=35
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=12
prog.h(input_qubit[2]) # number=30
prog.cz(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=32
prog.x(input_qubit[2]) # number=21
prog.h(input_qubit[2]) # number=36
prog.cz(input_qubit[0],input_qubit[2]) # number=37
prog.h(input_qubit[2]) # number=38
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[1],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.h(input_qubit[0]) # number=26
prog.cz(input_qubit[3],input_qubit[0]) # number=27
prog.h(input_qubit[0]) # number=28
prog.cx(input_qubit[3],input_qubit[0]) # number=14
prog.y(input_qubit[2]) # number=29
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC2449.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| benchmark/startQiskit_QC2449.py | 4,456 | qubit number=4 total number=42 implement the oracle O_f NOTE: use multi_control_toffoli_gate ('noancilla' mode) https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate oracle.barrier() circuit begin number=10 number=39 number=40 number=41 number=33 number=34 number=35 number=25 number=12 number=30 number=31 number=32 number=21 number=36 number=37 number=38 number=2 number=3 number=4 number=5 number=16 number=17 number=18 number=6 number=7 number=8 number=9 number=26 number=27 number=28 number=14 number=29 circuit end | 802 | en | 0.395193 |
# from nonbonded.cli.project.project import project
#
# __all__ = [project]
| nonbonded/cli/projects/__init__.py | 76 | from nonbonded.cli.project.project import project __all__ = [project] | 69 | en | 0.536717 |
# Copyright 2022 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pkg.apis.manager.v1beta1.python import api_pb2 as api
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class Trial(object):
def __init__(self, name, assignments, target_metric, metric_name, additional_metrics):
self.name = name
self.assignments = assignments
self.target_metric = target_metric
self.metric_name = metric_name
self.additional_metrics = additional_metrics
@staticmethod
def convert(trials):
res = []
for trial in trials:
if trial.status.condition == api.TrialStatus.TrialConditionType.SUCCEEDED:
new_trial = Trial.convertTrial(trial)
if new_trial is not None:
res.append(Trial.convertTrial(trial))
return res
@staticmethod
def convertTrial(trial):
assignments = []
for assignment in trial.spec.parameter_assignments.assignments:
assignments.append(Assignment.convert(assignment))
metric_name = trial.spec.objective.objective_metric_name
target_metric, additional_metrics = Metric.convert(
trial.status.observation, metric_name)
# If the target_metric is none, ignore the trial.
if target_metric is not None:
trial = Trial(trial.name, assignments, target_metric,
metric_name, additional_metrics)
return trial
return None
def __str__(self):
if self.name is None:
return "Trial(assignment: {})".format(", ".join([str(e) for e in self.assignments]))
else:
return "Trial(assignment: {}, metric_name: {}, metric: {}, additional_metrics: {})".format(
", ".join([str(e) for e in self.assignments]),
self.metric_name, self.target_metric,
", ".join(str(e) for e in self.additional_metrics))
class Assignment(object):
def __init__(self, name, value):
self.name = name
self.value = value
@staticmethod
def convert(assignment):
return Assignment(assignment.name, assignment.value)
@staticmethod
def generate(list_of_assignments):
res = []
for assignments in list_of_assignments:
buf = []
for assignment in assignments:
buf.append(
api.ParameterAssignment(name=assignment.name, value=str(assignment.value)))
rt = api.GetSuggestionsReply.ParameterAssignments(
assignments=buf)
res.append(rt)
return res
def __str__(self):
return "Assignment(name={}, value={})".format(self.name, self.value)
class Metric(object):
def __init__(self, name, value):
self.name = name
self.value = value
@staticmethod
def convert(observation, target):
metric = None
additional_metrics = []
for m in observation.metrics:
if m.name == target:
metric = Metric(m.name, m.value)
else:
additional_metrics.append(Metric(m.name, m.value))
return metric, additional_metrics
def __str__(self):
return "Metric(name={}, value={})".format(self.name, self.value)
| pkg/suggestion/v1beta1/internal/trial.py | 3,854 | Copyright 2022 The Kubeflow Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. If the target_metric is none, ignore the trial. | 605 | en | 0.852357 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# capmetrics-etl documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 11 00:08:57 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'capmetrics-etl'
copyright = '2016, Julio Gonzalez Altamirano'
author = 'Julio Gonzalez Altamirano'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
version = '0.1.0'
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'capmetrics-etldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'capmetrics-etl.tex', 'capmetrics-etl Documentation',
'Julio Gonzalez Altamirano', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'capmetrics-etl', 'capmetrics-etl Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'capmetrics-etl', 'capmetrics-etl Documentation',
author, 'capmetrics-etl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| docs/conf.py | 9,314 | !/usr/bin/env python3 -*- coding: utf-8 -*- capmetrics-etl documentation build configuration file, created by sphinx-quickstart on Mon Jan 11 00:08:57 2016. This file is execfile()d with the current directory set to its containing dir. Note that not all possible configuration values are present in this autogenerated file. All configuration values have a default; values that are commented out serve to show the default. If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here.sys.path.insert(0, os.path.abspath('.')) -- General configuration ------------------------------------------------ If your documentation needs a minimal Sphinx version, state it here.needs_sphinx = '1.0' Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. The suffix(es) of source filenames. You can specify multiple suffix as a list of string: source_suffix = ['.rst', '.md'] The encoding of source files.source_encoding = 'utf-8-sig' The master toctree document. General information about the project. The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in various other places throughout the built documents. The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages. This is also used if you do content translation via gettext catalogs. Usually you set "language" from the command line for these cases. There are two options for replacing |today|: either, you set today to some non-false value, then it is used:today = '' Else, today_fmt is used as the format for a strftime call.today_fmt = '%B %d, %Y' List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. The reST default role (used for this markup: `text`) to use for all documents.default_role = None If true, '()' will be appended to :func: etc. cross-reference text.add_function_parentheses = True If true, the current module name will be prepended to all description unit titles (such as .. function::).add_module_names = True If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default.show_authors = False The name of the Pygments (syntax highlighting) style to use. A list of ignored prefixes for module index sorting.modindex_common_prefix = [] If true, keep warnings as "system message" paragraphs in the built documents.keep_warnings = False If true, `todo` and `todoList` produce output, else they produce nothing. -- Options for HTML output ---------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available for each theme, see the documentation.html_theme_options = {} Add any paths that contain custom themes here, relative to this directory.html_theme_path = [] The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation".html_title = None A shorter title for the navigation bar. Default is the same as html_title.html_short_title = None The name of an image file (relative to this directory) to place at the top of the sidebar.html_logo = None The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 pixels large.html_favicon = None Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, relative to this directory. These files are copied directly to the root of the documentation.html_extra_path = [] If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format.html_last_updated_fmt = '%b %d, %Y' If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities.html_use_smartypants = True Custom sidebar templates, maps document names to template names.html_sidebars = {} Additional templates that should be rendered to pages, maps page names to template names.html_additional_pages = {} If false, no module index is generated.html_domain_indices = True If false, no index is generated.html_use_index = True If true, the index is split into individual pages for each letter.html_split_index = False If true, links to the reST sources are added to the pages.html_show_sourcelink = True If true, "Created using Sphinx" is shown in the HTML footer. Default is True.html_show_sphinx = True If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.html_show_copyright = True If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The value of this option must be the base URL from which the finished HTML is served.html_use_opensearch = '' This is the file name suffix for HTML files (e.g. ".xhtml").html_file_suffix = None Language to be used for generating the HTML full-text search index. Sphinx supports the following languages: 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'html_search_language = 'en' A dictionary with options for the search language support, empty by default. Now only 'ja' uses this config valuehtml_search_options = {'type': 'default'} The name of a javascript file (relative to the configuration directory) that implements a search results scorer. If empty, the default will be used.html_search_scorer = 'scorer.js' Output file base name for HTML help builder. -- Options for LaTeX output --------------------------------------------- The paper size ('letterpaper' or 'a4paper').'papersize': 'letterpaper', The font size ('10pt', '11pt' or '12pt').'pointsize': '10pt', Additional stuff for the LaTeX preamble.'preamble': '', Latex figure (float) alignment'figure_align': 'htbp', Grouping the document tree into LaTeX files. List of tuples (source start file, target name, title, author, documentclass [howto, manual, or own class]). The name of an image file (relative to this directory) to place at the top of the title page.latex_logo = None For "manual" documents, if this is true, then toplevel headings are parts, not chapters.latex_use_parts = False If true, show page references after internal links.latex_show_pagerefs = False If true, show URL addresses after external links.latex_show_urls = False Documents to append as an appendix to all manuals.latex_appendices = [] If false, no module index is generated.latex_domain_indices = True -- Options for manual page output --------------------------------------- One entry per manual page. List of tuples (source start file, name, description, authors, manual section). If true, show URL addresses after external links.man_show_urls = False -- Options for Texinfo output ------------------------------------------- Grouping the document tree into Texinfo files. List of tuples (source start file, target name, title, author, dir menu entry, description, category) Documents to append as an appendix to all manuals.texinfo_appendices = [] If false, no module index is generated.texinfo_domain_indices = True How to display URL addresses: 'footnote', 'no', or 'inline'.texinfo_show_urls = 'footnote' If true, do not generate a @detailmenu in the "Top" node's menu.texinfo_no_detailmenu = False | 7,889 | en | 0.637284 |
import string
from app.chatterbot import languages
from spacy.lang.zh import Chinese
class LowercaseTagger(object):
"""
Returns the text in lowercase.
"""
def __init__(self, language=None):
self.language = language or languages.ENG
def get_text_index_string(self, text):
return text.lower()
class PosLemmaTagger(object):
def __init__(self, language=None):
import spacy
self.language = language or languages.ENG
punc = "!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏."
# punc = punc.decode("utf-8")
self.punctuation_table = str.maketrans(dict.fromkeys(string.punctuation + punc))
language = self.language.ISO_639_1.lower()
if language == 'zh':
self.nlp = Chinese()
else:
self.nlp = spacy.load(language)
def get_text_index_string(self, text):
"""
Return a string of text containing part-of-speech, lemma pairs.
"""
if len(text) <= 2:
text_without_punctuation = text.translate(self.punctuation_table)
if len(text_without_punctuation) >= 1:
text = text_without_punctuation
document = self.nlp(text)
if len(text) <= 2:
bigram_pairs = [
token.lemma_.lower() for token in document
]
tokens = [ele for ele in bigram_pairs]
else:
tokens = [
token for token in document if token.is_alpha and not token.is_stop
]
if len(tokens) < 2:
tokens = [
token for token in document if token.is_alpha
]
tokens = [token.lemma_.lower() for token in tokens]
# if not bigram_pairs:
# bigram_pairs = [
# token.lemma_.lower() for token in document
# ]
#
# return ' '.join(bigram_pairs)
return ' '.join(tokens)
| app/chatterbot/tagging.py | 2,159 | Returns the text in lowercase.
Return a string of text containing part-of-speech, lemma pairs.
punc = punc.decode("utf-8") if not bigram_pairs: bigram_pairs = [ token.lemma_.lower() for token in document ] return ' '.join(bigram_pairs) | 253 | en | 0.315019 |
import torch_xla.test.test_utils as test_utils
import torch_xla.distributed.xla_multiprocessing as xmp
import torch_xla.core.xla_model as xm
import torch_xla.utils.utils as xu
import torch_xla.distributed.parallel_loader as pl
import torch_xla.debug.metrics as met
import torch_xla
import torchvision.transforms as transforms
import torchvision
import torch.optim as optim
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy as np
import sys
import os
import webdataset as wds
import datetime
import time
# import warnings
# warnings.filterwarnings("ignore")
from itertools import islice
import torch_xla.debug.profiler as xp
# profiler_port=9012
for extra in ('/usr/share/torch-xla-1.7/pytorch/xla/test', '/pytorch/xla/test', '/usr/share/pytorch/xla/test'):
if os.path.exists(extra):
sys.path.insert(0, extra)
import schedulers
# import gcsdataset
import args_parse # XLA arg parser
# import argparse # py arg parser
# parser = argparse.ArgumentParser(description='WebDataset args for modified XLA model')
# parser.add_argument('--wds_traindir', type=str, default='/tmp/imagenet')
# parser.add_argument('--wds_testdir', type=str, default='/tmp/imagenet')
# parser.add_argument('--trainsize', type=int, default=1280000)
# parser.add_argument('--testsize', type=int, default=50000)
# wds_args, others = parser.parse_known_args()
SUPPORTED_MODELS = [
'alexnet', 'densenet121', 'densenet161', 'densenet169', 'densenet201',
'inception_v3', 'resnet101', 'resnet152', 'resnet18', 'resnet34',
'resnet50', 'squeezenet1_0', 'squeezenet1_1', 'vgg11', 'vgg11_bn', 'vgg13',
'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn'
]
MODEL_OPTS = {
'--model': {
'choices': SUPPORTED_MODELS,
'default': 'resnet50',
},
'--test_set_batch_size': {
'type': int,
},
'--lr_scheduler_type': {
'type': str,
},
'--lr_scheduler_divide_every_n_epochs': {
'type': int,
},
'--lr_scheduler_divisor': {
'type': int,
},
'--dataset': {
'choices': ['gcsdataset', 'torchdataset'],
'default': 'gcsdataset',
'type': str,
},
}
# '--wds_traindir': {
# 'type': str,
# 'default':'/tmp/imagenet'
# },
# '--wds_testdir': {
# 'type': str,
# 'default': '/tmp/imagenet'
# },
# '--trainsize': {
# 'type': int,
# 'default': 1280000
# },
# '--testsize': {
# 'type': int,
# 'default': 50000
# },
FLAGS = args_parse.parse_common_options(
datadir='/tmp/imagenet',
batch_size=None,
num_epochs=None,
momentum=None,
lr=None,
target_accuracy=None,
opts=MODEL_OPTS.items(),
profiler_port=9012,
)
DEFAULT_KWARGS = dict(
batch_size=128,
test_set_batch_size=64,
num_epochs=18,
momentum=0.9,
lr=0.1,
target_accuracy=0.0,
)
MODEL_SPECIFIC_DEFAULTS = {
# Override some of the args in DEFAULT_KWARGS, or add them to the dict
# if they don't exist.
'resnet50':
dict(
DEFAULT_KWARGS, **{
'lr': 0.5,
'lr_scheduler_divide_every_n_epochs': 20,
'lr_scheduler_divisor': 5,
'lr_scheduler_type': 'WarmupAndExponentialDecayScheduler',
})
}
# Set any args that were not explicitly given by the user.
default_value_dict = MODEL_SPECIFIC_DEFAULTS.get(FLAGS.model, DEFAULT_KWARGS)
for arg, value in default_value_dict.items():
if getattr(FLAGS, arg) is None:
setattr(FLAGS, arg, value)
def get_model_property(key):
default_model_property = {
'img_dim': 224,
'model_fn': getattr(torchvision.models, FLAGS.model)
}
model_properties = {
'inception_v3': {
'img_dim': 299,
'model_fn': lambda: torchvision.models.inception_v3(aux_logits=False)
},
}
model_fn = model_properties.get(FLAGS.model, default_model_property)[key]
return model_fn
def _train_update(device, step, loss, tracker, epoch, writer):
test_utils.print_training_update(
device,
step,
loss.item(),
tracker.rate(),
tracker.global_rate(),
epoch,
summary_writer=writer)
##### WDS ########
# trainsize = 1281167 # all shards
trainsize = 1280000 #FLAGS.trainsize # 1280 shards {000...079}
testsize = 50000 # FLAGS.testsize
# train_dir = FLAGS.wds_traindir
# test_dir = FLAGS.wds_testdir
def identity(x):
return x
def my_worker_splitter(urls):
"""Split urls per worker
Selects a subset of urls based on Torch get_worker_info.
Used as a shard selection function in Dataset.
replaces wds.split_by_worker"""
# import torch
urls = [url for url in urls]
assert isinstance(urls, list)
worker_info = torch.utils.data.get_worker_info()
if worker_info is not None:
wid = worker_info.id
num_workers = worker_info.num_workers
return urls[wid::num_workers]
else:
return urls
def my_node_splitter(urls):
"""Split urls_ correctly per accelerator node
:param urls:
:return: slice of urls_
"""
rank=xm.get_ordinal()
num_replicas=xm.xrt_world_size()
urls_this = urls[rank::num_replicas]
return urls_this
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def make_train_loader(img_dim, shuffle=10000, batch_size=FLAGS.batch_size):
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-train-{000000..001281}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-train-{000000..001279}.tar"
# "pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-train-{000000..001281}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-320/imagenet-train-{000000..000320}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-640/imagenet-train-{000000..000639}.tar"
num_dataset_instances = xm.xrt_world_size() * FLAGS.num_workers
epoch_size = trainsize // num_dataset_instances
# num_batches = (epoch_size + batch_size - 1) // batch_size
# num_batches = epoch_size // batch_size
image_transform = transforms.Compose(
[
transforms.RandomResizedCrop(img_dim),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]
)
dataset = (
wds.WebDataset("pipe:cat /mnt/disks/dataset/webdataset/shards-640/imagenet-train-{000000..000639}.tar", # FLAGS.wds_traindir,
splitter=my_worker_splitter, nodesplitter=my_node_splitter, shardshuffle=True, length=epoch_size)
.shuffle(shuffle)
.decode("pil")
.to_tuple("ppm;jpg;jpeg;png", "cls")
.map_tuple(image_transform, identity)
.batched(batch_size, partial=True)
)
loader = torch.utils.data.DataLoader(dataset, batch_size=None, shuffle=False, drop_last=False, num_workers=FLAGS.num_workers) # , worker_init_fn=worker_init_fn
return loader
def make_val_loader(img_dim, resize_dim, batch_size=FLAGS.test_set_batch_size):
num_dataset_instances = xm.xrt_world_size() * FLAGS.num_workers
epoch_test_size = testsize // num_dataset_instances
# num_batches = (epoch_size + batch_size - 1) // batch_size
# num_test_batches = epoch_test_size // batch_size
val_transform = transforms.Compose(
[
transforms.Resize(resize_dim),
transforms.CenterCrop(img_dim),
transforms.ToTensor(),
normalize,
]
)
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-320/imagenet-val-{000000..000012}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-val-{000000..000049}.tar"
# "pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-val-{000000..000049}.tar"
# "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-640/imagenet-val-{000000..000024}.tar"
val_dataset = (
wds.WebDataset("pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-val-{000000..000049}.tar", # FLAGS.wds_testdir,
splitter=my_worker_splitter, nodesplitter=my_node_splitter, shardshuffle=False, length=epoch_test_size)
.decode("pil")
.to_tuple("ppm;jpg;jpeg;png", "cls")
.map_tuple(val_transform, identity)
.batched(batch_size, partial=True)
)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=None, shuffle=False, num_workers=FLAGS.num_workers) # , worker_init_fn=worker_init_fn, pin_memory=False
return val_loader
def train_imagenet():
print('==> Preparing data..')
img_dim = get_model_property('img_dim')
resize_dim = max(img_dim, 256)
train_loader = make_train_loader(img_dim, batch_size=FLAGS.batch_size, shuffle=10000)
test_loader = make_val_loader(img_dim, resize_dim, batch_size=FLAGS.test_set_batch_size)
torch.manual_seed(42)
server = xp.start_server(FLAGS.profiler_port)
device = xm.xla_device()
model = get_model_property('model_fn')().to(device)
writer = None
if xm.is_master_ordinal():
writer = test_utils.get_summary_writer(FLAGS.logdir)
optimizer = optim.SGD(
model.parameters(),
lr=FLAGS.lr,
momentum=FLAGS.momentum,
weight_decay=1e-4)
num_training_steps_per_epoch = trainsize // (
FLAGS.batch_size * xm.xrt_world_size())
lr_scheduler = schedulers.wrap_optimizer_with_scheduler(
optimizer,
scheduler_type=getattr(FLAGS, 'lr_scheduler_type', None),
scheduler_divisor=getattr(FLAGS, 'lr_scheduler_divisor', None),
scheduler_divide_every_n_epochs=getattr(
FLAGS, 'lr_scheduler_divide_every_n_epochs', None),
num_steps_per_epoch=num_training_steps_per_epoch,
summary_writer=writer)
loss_fn = nn.CrossEntropyLoss()
# global_step = 0
# server = xp.start_server(profiler_port)
def train_loop_fn(loader, epoch):
train_steps = trainsize // (FLAGS.batch_size * xm.xrt_world_size())
tracker = xm.RateTracker()
total_samples = 0
rate_list = []
model.train()
for step, (data, target) in enumerate(loader): # repeatedly(loader) | enumerate(islice(loader, 0, train_steps))
# global_step += 1
optimizer.zero_grad()
output = model(data)
loss = loss_fn(output, target)
loss.backward()
xm.optimizer_step(optimizer)
tracker.add(FLAGS.batch_size)
total_samples += data.size()[0]
# rate_list.append(tracker.rate())
# replica_rate = tracker.rate()
# global_rate = tracker.global_rate()
if lr_scheduler:
lr_scheduler.step()
if step % FLAGS.log_steps == 0:
xm.add_step_closure(
_train_update, args=(device, step, loss, tracker, epoch, writer))
test_utils.write_to_summary(writer, step, dict_to_write={'Rate_step': tracker.rate()}, write_xla_metrics=False)
if step == train_steps:
break
# replica_max_rate = np.max(tracker.rate())
reduced_global = xm.mesh_reduce('reduced_global', tracker.global_rate(), np.mean)
# reduced_max_rate = xm.mesh_reduce('max_rate', tracker.rate(), np.mean)
return total_samples, reduced_global
def test_loop_fn(loader, epoch):
test_steps = testsize // (FLAGS.test_set_batch_size * xm.xrt_world_size())
total_samples, correct = 0, 0
model.eval()
for step, (data, target) in enumerate(loader): # repeatedly(loader) | enumerate(islice(loader, 0, test_steps)
output = model(data)
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum()
total_samples += data.size()[0]
if step % FLAGS.log_steps == 0:
xm.add_step_closure(
test_utils.print_test_update, args=(device, None, epoch, step))
if step == test_steps:
break
correct_val = correct.item()
accuracy_replica = 100.0 * correct_val / total_samples
accuracy = xm.mesh_reduce('test_accuracy', accuracy_replica, np.mean)
return accuracy, accuracy_replica, total_samples
train_device_loader = pl.MpDeviceLoader(train_loader, device)
test_device_loader = pl.MpDeviceLoader(test_loader, device)
accuracy, max_accuracy = 0.0, 0.0
training_start_time = time.time()
for epoch in range(1, FLAGS.num_epochs + 1):
xm.master_print('Epoch {} train begin {}'.format(
epoch, test_utils.now()))
replica_epoch_start = time.time()
replica_train_samples, reduced_global = train_loop_fn(train_device_loader, epoch)
replica_epoch_time = time.time() - replica_epoch_start
avg_epoch_time_mesh = xm.mesh_reduce('epoch_time', replica_epoch_time, np.mean)
reduced_global = reduced_global * xm.xrt_world_size()
xm.master_print('Epoch {} train end {}, Epoch Time={}, Replica Train Samples={}, Reduced GlobalRate={:.2f}'.format(
epoch, test_utils.now(), str(datetime.timedelta(seconds=avg_epoch_time_mesh)).split('.')[0], replica_train_samples, reduced_global))
accuracy, accuracy_replica, replica_test_samples = test_loop_fn(test_device_loader, epoch)
xm.master_print('Epoch {} test end {}, Reduced Accuracy={:.2f}%, Replica Accuracy={:.2f}%, Replica Test Samples={}'.format(
epoch, test_utils.now(), accuracy, accuracy_replica, replica_test_samples))
max_accuracy = max(accuracy, max_accuracy)
test_utils.write_to_summary(
writer,
epoch,
dict_to_write={'Accuracy/test': accuracy,
'Global Rate': reduced_global},
write_xla_metrics=False)
if FLAGS.metrics_debug:
xm.master_print(met.metrics_report())
test_utils.close_summary_writer(writer)
total_train_time = time.time() - training_start_time
xm.master_print('Total Train Time: {}'.format(str(datetime.timedelta(seconds=total_train_time)).split('.')[0]))
xm.master_print('Max Accuracy: {:.2f}%'.format(max_accuracy))
xm.master_print('Avg. Global Rate: {:.2f} examples per second'.format(reduced_global))
return max_accuracy
def _mp_fn(index, flags):
global FLAGS
FLAGS = flags
torch.set_default_tensor_type('torch.FloatTensor')
accuracy = train_imagenet()
if accuracy < FLAGS.target_accuracy:
print('Accuracy {} is below target {}'.format(accuracy,
FLAGS.target_accuracy))
sys.exit(21)
if __name__ == '__main__':
xmp.spawn(_mp_fn, args=(FLAGS,), nprocs=FLAGS.num_cores, start_method='fork') # , start_method='spawn'
| test_train_mp_wds_local.py | 15,091 | Split urls_ correctly per accelerator node
:param urls:
:return: slice of urls_
Split urls per worker
Selects a subset of urls based on Torch get_worker_info.
Used as a shard selection function in Dataset.
replaces wds.split_by_worker
import warnings warnings.filterwarnings("ignore") profiler_port=9012 import gcsdataset XLA arg parser import argparse py arg parser parser = argparse.ArgumentParser(description='WebDataset args for modified XLA model') parser.add_argument('--wds_traindir', type=str, default='/tmp/imagenet') parser.add_argument('--wds_testdir', type=str, default='/tmp/imagenet') parser.add_argument('--trainsize', type=int, default=1280000) parser.add_argument('--testsize', type=int, default=50000) wds_args, others = parser.parse_known_args() '--wds_traindir': { 'type': str, 'default':'/tmp/imagenet' }, '--wds_testdir': { 'type': str, 'default': '/tmp/imagenet' }, '--trainsize': { 'type': int, 'default': 1280000 }, '--testsize': { 'type': int, 'default': 50000 }, Override some of the args in DEFAULT_KWARGS, or add them to the dict if they don't exist. Set any args that were not explicitly given by the user. WDS trainsize = 1281167 all shardsFLAGS.trainsize 1280 shards {000...079} FLAGS.testsize train_dir = FLAGS.wds_traindir test_dir = FLAGS.wds_testdir import torch "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-train-{000000..001281}.tar" "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-train-{000000..001279}.tar" "pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-train-{000000..001281}.tar" "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-320/imagenet-train-{000000..000320}.tar" "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-640/imagenet-train-{000000..000639}.tar" num_batches = (epoch_size + batch_size - 1) // batch_size num_batches = epoch_size // batch_size FLAGS.wds_traindir, , worker_init_fn=worker_init_fn num_batches = (epoch_size + batch_size - 1) // batch_size num_test_batches = epoch_test_size // batch_size "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-320/imagenet-val-{000000..000012}.tar" "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards/imagenet-val-{000000..000049}.tar" "pipe:cat /mnt/disks/dataset/webdataset/shards/imagenet-val-{000000..000049}.tar" "pipe:gsutil cat gs://tpu-demo-eu-west/imagenet-wds/wds-data/shards-640/imagenet-val-{000000..000024}.tar" FLAGS.wds_testdir, , worker_init_fn=worker_init_fn, pin_memory=False global_step = 0 server = xp.start_server(profiler_port) repeatedly(loader) | enumerate(islice(loader, 0, train_steps)) global_step += 1 rate_list.append(tracker.rate()) replica_rate = tracker.rate() global_rate = tracker.global_rate() replica_max_rate = np.max(tracker.rate()) reduced_max_rate = xm.mesh_reduce('max_rate', tracker.rate(), np.mean) repeatedly(loader) | enumerate(islice(loader, 0, test_steps) , start_method='spawn' | 3,141 | en | 0.306259 |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoinlimitededition Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the REST API."""
from test_framework.test_framework import BitcoinlimitededitionTestFramework
from test_framework.util import *
from struct import *
from io import BytesIO
from codecs import encode
import http.client
import urllib.parse
def deser_uint256(f):
r = 0
for i in range(8):
t = unpack(b"<I", f.read(4))[0]
r += t << (i * 32)
return r
#allows simple http get calls
def http_get_call(host, port, path, response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('GET', path)
if response_object:
return conn.getresponse()
return conn.getresponse().read().decode('utf-8')
#allows simple http post calls with a request body
def http_post_call(host, port, path, requestdata = '', response_object = 0):
conn = http.client.HTTPConnection(host, port)
conn.request('POST', path, requestdata)
if response_object:
return conn.getresponse()
return conn.getresponse().read()
class RESTTest (BitcoinlimitededitionTestFramework):
FORMAT_SEPARATOR = "."
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-rest"]] * self.num_nodes
def setup_network(self, split=False):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
url = urllib.parse.urlparse(self.nodes[0].url)
self.log.info("Mining blocks...")
self.nodes[0].generate(1)
self.sync_all()
self.nodes[2].generate(100)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
self.sync_all()
self.nodes[2].generate(1)
self.sync_all()
bb_hash = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbalance(), Decimal("0.1")) #balance now should be 0.1 on node 1
# load the latest 0.1 tx over the REST API
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
#######################################
# GETUTXOS: query an unspent outpoint #
#######################################
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is one utxo
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['utxos'][0]['value'], 0.1)
#################################################
# GETUTXOS: now query an already spent outpoint #
#################################################
json_request = '/checkmempool/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
#check chainTip response
assert_equal(json_obj['chaintipHash'], bb_hash)
#make sure there is no utxo in the response because this oupoint has been spent
assert_equal(len(json_obj['utxos']), 0)
#check bitmap
assert_equal(json_obj['bitmap'], "0")
##################################################
# GETUTXOS: now check both with the same request #
##################################################
json_request = '/checkmempool/'+txid+'-'+str(n)+'/'+vintx+'-0'
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1)
assert_equal(json_obj['bitmap'], "10")
#test binary response
bb_hash = self.nodes[0].getbestblockhash()
binaryRequest = b'\x01\x02'
binaryRequest += hex_str_to_bytes(txid)
binaryRequest += pack("i", n)
binaryRequest += hex_str_to_bytes(vintx)
binaryRequest += pack("i", 0)
bin_response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', binaryRequest)
output = BytesIO()
output.write(bin_response)
output.seek(0)
chainHeight = unpack("i", output.read(4))[0]
hashFromBinResponse = hex(deser_uint256(output))[2:].zfill(64)
assert_equal(bb_hash, hashFromBinResponse) #check if getutxo's chaintip during calculation was fine
assert_equal(chainHeight, 102) #chain height must be 102
############################
# GETUTXOS: mempool checks #
############################
# do a tx and don't sync
txid = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.1)
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+txid+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
vintx = json_obj['vin'][0]['txid'] # get the vin to later check for utxo (should be spent by then)
# get n of 0.1 outpoint
n = 0
for vout in json_obj['vout']:
if vout['value'] == 0.1:
n = vout['n']
json_request = '/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 0) #there should be an outpoint because it has just added to the mempool
json_request = '/checkmempool/'+txid+'-'+str(n)
json_string = http_get_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(len(json_obj['utxos']), 1) #there should be an outpoint because it has just added to the mempool
#do some invalid requests
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'json', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid json request
json_request = '{"checkmempool'
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+self.FORMAT_SEPARATOR+'bin', json_request, True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid bin request
response = http_post_call(url.hostname, url.port, '/rest/getutxos/checkmempool'+self.FORMAT_SEPARATOR+'bin', '', True)
assert_equal(response.status, 400) #must be a 400 because we send an invalid bin request
#test limits
json_request = '/checkmempool/'
for x in range(0, 20):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 400) #must be a 400 because we exceeding the limits
json_request = '/checkmempool/'
for x in range(0, 15):
json_request += txid+'-'+str(n)+'/'
json_request = json_request.rstrip("/")
response = http_post_call(url.hostname, url.port, '/rest/getutxos'+json_request+self.FORMAT_SEPARATOR+'json', '', True)
assert_equal(response.status, 200) #must be a 200 because we are within the limits
self.nodes[0].generate(1) #generate block to not affect upcoming tests
self.sync_all()
################
# /rest/block/ #
################
# check binary format
response = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response.status, 200)
assert_greater_than(int(response.getheader('content-length')), 80)
response_str = response.read()
# compare with block header
response_header = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"bin", True)
assert_equal(response_header.status, 200)
assert_equal(int(response_header.getheader('content-length')), 80)
response_header_str = response_header.read()
assert_equal(response_str[0:80], response_header_str)
# check block hex format
response_hex = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_hex.status, 200)
assert_greater_than(int(response_hex.getheader('content-length')), 160)
response_hex_str = response_hex.read()
assert_equal(encode(response_str, "hex_codec")[0:160], response_hex_str[0:160])
# compare with hex block header
response_header_hex = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(response_header_hex.status, 200)
assert_greater_than(int(response_header_hex.getheader('content-length')), 160)
response_header_hex_str = response_header_hex.read()
assert_equal(response_hex_str[0:160], response_header_hex_str[0:160])
assert_equal(encode(response_header_str, "hex_codec")[0:160], response_header_hex_str[0:160])
# check json format
block_json_string = http_get_call(url.hostname, url.port, '/rest/block/'+bb_hash+self.FORMAT_SEPARATOR+'json')
block_json_obj = json.loads(block_json_string)
assert_equal(block_json_obj['hash'], bb_hash)
# compare with json block header
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/1/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str, parse_float=Decimal)
assert_equal(len(json_obj), 1) #ensure that there is one header in the json response
assert_equal(json_obj[0]['hash'], bb_hash) #request/response hash should be the same
#compare with normal RPC block response
rpc_block_json = self.nodes[0].getblock(bb_hash)
assert_equal(json_obj[0]['hash'], rpc_block_json['hash'])
assert_equal(json_obj[0]['confirmations'], rpc_block_json['confirmations'])
assert_equal(json_obj[0]['height'], rpc_block_json['height'])
assert_equal(json_obj[0]['version'], rpc_block_json['version'])
assert_equal(json_obj[0]['merkleroot'], rpc_block_json['merkleroot'])
assert_equal(json_obj[0]['time'], rpc_block_json['time'])
assert_equal(json_obj[0]['nonce'], rpc_block_json['nonce'])
assert_equal(json_obj[0]['bits'], rpc_block_json['bits'])
assert_equal(json_obj[0]['difficulty'], rpc_block_json['difficulty'])
assert_equal(json_obj[0]['chainwork'], rpc_block_json['chainwork'])
assert_equal(json_obj[0]['previousblockhash'], rpc_block_json['previousblockhash'])
#see if we can get 5 headers in one response
self.nodes[1].generate(5)
self.sync_all()
response_header_json = http_get_call(url.hostname, url.port, '/rest/headers/5/'+bb_hash+self.FORMAT_SEPARATOR+"json", True)
assert_equal(response_header_json.status, 200)
response_header_json_str = response_header_json.read().decode('utf-8')
json_obj = json.loads(response_header_json_str)
assert_equal(len(json_obj), 5) #now we should have 5 header objects
# do tx test
tx_hash = block_json_obj['tx'][0]['txid']
json_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"json")
json_obj = json.loads(json_string)
assert_equal(json_obj['txid'], tx_hash)
# check hex format response
hex_string = http_get_call(url.hostname, url.port, '/rest/tx/'+tx_hash+self.FORMAT_SEPARATOR+"hex", True)
assert_equal(hex_string.status, 200)
assert_greater_than(int(response.getheader('content-length')), 10)
# check block tx details
# let's make 3 tx and mine them on node 1
txs = []
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
txs.append(self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11))
self.sync_all()
# check that there are exactly 3 transactions in the TX memory pool before generating the block
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/info'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
assert_equal(json_obj['size'], 3)
# the size of the memory pool should be greater than 3x ~100 bytes
assert_greater_than(json_obj['bytes'], 300)
# check that there are our submitted transactions in the TX memory pool
json_string = http_get_call(url.hostname, url.port, '/rest/mempool/contents'+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for i, tx in enumerate(txs):
assert_equal(tx in json_obj, True)
assert_equal(json_obj[tx]['spentby'], txs[i+1:i+2])
assert_equal(json_obj[tx]['depends'], txs[i-1:i])
# now mine the transactions
newblockhash = self.nodes[1].generate(1)
self.sync_all()
#check if the 3 tx show up in the new block
json_string = http_get_call(url.hostname, url.port, '/rest/block/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in json_obj['tx']:
if not 'coinbase' in tx['vin'][0]: #exclude coinbase
assert_equal(tx['txid'] in txs, True)
#check the same but without tx details
json_string = http_get_call(url.hostname, url.port, '/rest/block/notxdetails/'+newblockhash[0]+self.FORMAT_SEPARATOR+'json')
json_obj = json.loads(json_string)
for tx in txs:
assert_equal(tx in json_obj['tx'], True)
#test rest bestblock
bb_hash = self.nodes[0].getbestblockhash()
json_string = http_get_call(url.hostname, url.port, '/rest/chaininfo.json')
json_obj = json.loads(json_string)
assert_equal(json_obj['bestblockhash'], bb_hash)
if __name__ == '__main__':
RESTTest ().main ()
| test/functional/interface_rest.py | 15,219 | Test the REST API.
!/usr/bin/env python3 Copyright (c) 2014-2017 The Bitcoinlimitededition Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.allows simple http get callsallows simple http post calls with a request bodybalance now should be 0.1 on node 1 load the latest 0.1 tx over the REST API get the vin to later check for utxo (should be spent by then) get n of 0.1 outpoint GETUTXOS: query an unspent outpoint check chainTip responsemake sure there is one utxo GETUTXOS: now query an already spent outpoint check chainTip responsemake sure there is no utxo in the response because this oupoint has been spentcheck bitmap GETUTXOS: now check both with the same request test binary responsecheck if getutxo's chaintip during calculation was finechain height must be 102 GETUTXOS: mempool checks do a tx and don't sync get the vin to later check for utxo (should be spent by then) get n of 0.1 outpointthere should be an outpoint because it has just added to the mempoolthere should be an outpoint because it has just added to the mempooldo some invalid requestsmust be a 400 because we send an invalid json requestmust be a 400 because we send an invalid bin requestmust be a 400 because we send an invalid bin requesttest limitsmust be a 400 because we exceeding the limitsmust be a 200 because we are within the limitsgenerate block to not affect upcoming tests /rest/block/ check binary format compare with block header check block hex format compare with hex block header check json format compare with json block headerensure that there is one header in the json responserequest/response hash should be the samecompare with normal RPC block responsesee if we can get 5 headers in one responsenow we should have 5 header objects do tx test check hex format response check block tx details let's make 3 tx and mine them on node 1 check that there are exactly 3 transactions in the TX memory pool before generating the block the size of the memory pool should be greater than 3x ~100 bytes check that there are our submitted transactions in the TX memory pool now mine the transactionscheck if the 3 tx show up in the new blockexclude coinbasecheck the same but without tx detailstest rest bestblock | 2,305 | en | 0.788556 |
import random
import turtle
import time
def menu():
x = input('would you like to start the game? \n (YES/NO) \n would you like to quit the menu bar? \n (QUIT) \n *PLEASE USE CAPITAL LETTERS \n YOUR ANSWER: ')
if x == 'NO' or x == 'QUIT':
quit()
elif x == 'YES':
print('')
menu()
print('are you MALE/FEMALE ? ')
print('*PLEASE USE CAPITAL LETTERS')
gender = input('ANSWER:')
#lists
box_color_list = ["box1.gif", "box2.gif", "box3.gif", "box4.gif", "box5.gif"]
background_list = ["background1.gif", "background2.gif", "background3.gif", "background4.gif"]
randombox = random.randint (0, len(box_color_list)-1)
this_box = box_color_list[randombox]
box = turtle.clone()
turtle.register_shape(this_box)
box.shape(this_box)
background = random.randint (0,4)
screen = turtle.Screen()
randbackground = random.randint (0,len(background_list)-1)
this_background = background_list [randbackground]
turtle.register_shape(this_background)
turtle.bgpic (this_background)
turtle.tracer(1, 0)
turtle2 = turtle.clone()
score = 0
turtle2.write(str(score))
turtle2.ht()
turtle.penup()
#bird = turtle.clone()
#turtle.addshape('bird.gif')
#bird.shape('bird.gif')
turtle.shape('circle')
#turtle.hideturtle()
turtle.Screen()
turtle.fillcolor('white')
screen = turtle.Screen()
screen.bgcolor('light blue')
turtle.goto(0,-200)
good_food_pos= []
boxes_list = []
bad_food_pos = []
good_food_stamps = []
bad_food_stamps = []
box_stamps = []
box_pos=[]
bird_pos=[]
turtles_list = []
SIZE_X = 400
SIZE_Y = 400
turtle.setup(500,500)
player_size = 10
my_pos = turtle.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
UP_EDGE = 200
DOWN_EDGE = -200
RIGHT_EDGE = 200
LEFT_EDGE = -200
UP_ARROW = 'Up'
LEFT_ARROW = 'Left'
DOWN_ARROW = 'Down'
RIGHT_ARROW = 'Right'
TIME_STEP = 100
TIME_STEP2 = 10000
SPACEBAR = 'space'
def move_player():
my_pos = turtle.pos()
x_pos = my_pos[0]
y_pos = my_pos[1]
x_ok = LEFT_EDGE <= x_pos <= RIGHT_EDGE
y_ok = UP_EDGE >= y_pos >= DOWN_EDGE
within_bounds = x_ok and y_ok
if turtle.pos()[0] == RIGHT_EDGE:
turtle.goto (LEFT_EDGE + 20,turtle.pos()[1])
if turtle.pos()[0] == LEFT_EDGE :
turtle.goto (RIGHT_EDGE - 20,turtle.pos()[1])
####'''
#### if x_pos >= RIGHT_EDGE:
#### turtle.goto(RIGHT_EDGE - 10, y_pos)
#### if x_pos <= LEFT_EDGE:
#### turtle.goto(LEFT_EDGE + 10, y_pos)
#### if y_pos >= UP_EDGE:
#### turtle.goto(x_pos, UP_EDGE + 10)
####'''
## if within_bounds:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10,y_pos)
## elif direction == UP:
## turtle.goto(x_pos, y_pos +10)
##
## #if turtle.pos() == my_clone.pos():
##
##
## '''
## else:
## # x checks
## # right edge check
## if x_pos >= RIGHT_EDGE:
## if direction == LEFT:
## turtle.goto(x_pos - 1,y_pos)
## if x_pos <= LEFT_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 1,y_pos)
##
## if y_pos >= UP_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10, y_pos)
## elif direction == DOWN:
## turtle.goto(x_pos, y_pos -10)
##
## if y_pos <= DOWN_EDGE:
## if direction == RIGHT:
## turtle.goto(x_pos + 10,y_pos)
## elif direction == LEFT:
## turtle.goto(x_pos - 10, y_pos)
## elif direction == UP:
## turtle.goto(x_pos, y_pos + 10)
## '''
global food,score
#turtle.ontimer(move_player,TIME_STEP)
if turtle.pos() in good_food_pos:
good_food_ind = good_food_pos.index(turtle.pos())
food.clearstamp(good_food_stamps[good_food_ind])
good_food_stamps.pop(good_food_ind)
good_food_pos.pop(good_food_ind)
print('EATEN GOOD FOOD!')
score = score + 1
turtle2.clear()
turtle2.write(str(score))
good_food()
if turtle.pos() in bad_food_pos:
bad_food_ind = bad_food_pos.index(turtle.pos())
bad_food.clearstamp(bad_food_stamps[bad_food_ind])
bad_food_stamps.pop(bad_food_ind)
bad_food_pos.pop(bad_food_ind)
print('EATEN BAD FOOD!')
score = score - 1
turtle2.clear()
turtle2.write(str(score))
if score == -5:
print('GAME OVER!')
quit()
bad_food1()
UP = 0
LEFT = 1
DOWN = 2
RIGHT = 3
direction = DOWN
turtle.register_shape('man_right.gif')
turtle.register_shape('man_left.gif')
turtle.register_shape('woman_right.gif')
turtle.register_shape('woman_left.gif')
if gender == "MALE" :
turtle.shape('man_right.gif')
else:
turtle.shape('woman_right.gif')
def left():
global direction
direction = LEFT
if gender == "MALE" :
turtle.shape('man_left.gif')
else:
turtle.shape('woman_left.gif')
move_player()
print('you pressed the left key')
def right():
global direction
direction = RIGHT
if gender == "MALE" :
turtle.shape('man_right.gif')
else:
turtle.shape('woman_right.gif')
move_player()
print('you pressed the right key')
turtle.onkeypress(left, LEFT_ARROW)
turtle.onkeypress(right, RIGHT_ARROW)
turtle.listen()
good_pos = (0,0) ##
food = turtle.clone()
food.shape('square')
food.fillcolor('green')
food.hideturtle()
def good_food():
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
food_x = random.randint(min_x,max_x)*player_size
food.goto(food_x,turtle.pos()[1])
good_food_pos.append(food.pos())
stampnew = food.stamp()
#stamp_old = food_stamps[-1]
good_food_stamps.append(stampnew)
def create_box():
global y_pos,box,SIZE_X,player_size
top_y = 300
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
x = random.randint(min_x,max_x)*player_size
turtles_list.append(turtle.clone())
turtles_list[-1].hideturtle()
turtles_list[-1].shape("square")
turtles_list[-1].fillcolor('red')
turtles_list[-1].goto(x,top_y)
turtles_list[-1].showturtle()
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
x = random.randint(min_x,max_x)*player_size
turtles_list[-1].goto(x,top_y)
turtles_list[-1].showturtle()
chose_number()
#box.goto(x,y_pos)
#box.goto(x,260)
#box.addshape('box.gif')
#box.shape('box.gif')
#all_way = 510
count = 0
def fall():
global turtles_list,top_y,x_pos,turtle,count
for my_clone in turtles_list:
x1 = my_clone.pos()[0]
y1 = my_clone.pos()[1]
if y1 > turtle.pos()[1]:
y1 = y1 -25
#x1 = x_pos
my_clone.goto(x1,y1)
count += 1
print(count)
if count%100==0:
num_box = count//100
for i in range(num_box):
create_box()
#for num_box in :
#create_box()
#turtle.ontimer(create_box,TIME_STEP2)
turtle.ontimer(fall,TIME_STEP)
def jump():
global direction,x_pos,y_pos,my_pos,y1
if direction == UP:
turtle.goto(turtle.pos()[0],turtle.pos()[1] + 20)
for my_turtle in turtles_list:
if turtle.pos() == my_turtle.pos():
if turtle.pos() == my_turtle.pos():
turtle.goto(turtle.pos()[0],y1)
if not turtle.pos() == my_clone.pos():
turtle.goto(turtle.pos()[0],turtle.pos()[1] - 20)
def chose_number():
number_of_boxes=random.randint(1,3)
for i in range (number_of_boxes):
x5 = turtle.clone()
x5.shape("square")
boxes_list.append(x5)
for g in boxes_list:
g.goto(random.randint(-200,200),200)
bad_pos = (0,0)
bad_food = turtle.clone()
bad_food.shape('square')
bad_food.fillcolor('black')
bad_food.hideturtle()
def bad_food1():
global SIZE_X,player_size,y_pos,bad_food
min_x=-int(SIZE_X/2/player_size)+1
max_x=int(SIZE_X/2/player_size)-1
bad_food_x = random.randint(min_x,max_x)*player_size
bad_food.goto(bad_food_x,y_pos)
bad_food_pos.append(bad_food.pos())
bad_stamp_new = bad_food.stamp()
#stamp_old = food_stamps[-1]
bad_food_stamps.append(bad_stamp_new)
my_clone = turtle.clone()
my_clone.ht()
bad_food1()
good_food()
move_player()
create_box()
fall()
if turtle.pos() in box_pos:
print("YOU LOST !")
quit()
| jennifer & Laith/laith_eatit.py | 8,751 | listsbird = turtle.clone()turtle.addshape('bird.gif')bird.shape('bird.gif')turtle.hideturtle()''' if x_pos >= RIGHT_EDGE: turtle.goto(RIGHT_EDGE - 10, y_pos) if x_pos <= LEFT_EDGE: turtle.goto(LEFT_EDGE + 10, y_pos) if y_pos >= UP_EDGE: turtle.goto(x_pos, UP_EDGE + 10)''' if within_bounds: if direction == RIGHT: turtle.goto(x_pos + 10,y_pos) elif direction == LEFT: turtle.goto(x_pos - 10,y_pos) elif direction == UP: turtle.goto(x_pos, y_pos +10) if turtle.pos() == my_clone.pos(): ''' else: x checks right edge check if x_pos >= RIGHT_EDGE: if direction == LEFT: turtle.goto(x_pos - 1,y_pos) if x_pos <= LEFT_EDGE: if direction == RIGHT: turtle.goto(x_pos + 1,y_pos) if y_pos >= UP_EDGE: if direction == RIGHT: turtle.goto(x_pos + 10,y_pos) elif direction == LEFT: turtle.goto(x_pos - 10, y_pos) elif direction == DOWN: turtle.goto(x_pos, y_pos -10) if y_pos <= DOWN_EDGE: if direction == RIGHT: turtle.goto(x_pos + 10,y_pos) elif direction == LEFT: turtle.goto(x_pos - 10, y_pos) elif direction == UP: turtle.goto(x_pos, y_pos + 10) ''' turtle.ontimer(move_player,TIME_STEP)stamp_old = food_stamps[-1]box.goto(x,y_pos)box.goto(x,260)box.addshape('box.gif')box.shape('box.gif')all_way = 510x1 = x_posfor num_box in :create_box()turtle.ontimer(create_box,TIME_STEP2)stamp_old = food_stamps[-1] | 1,730 | en | 0.222785 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ['baidu_download']
from ..common import *
from .embed import *
from .universal import *
def baidu_get_song_data(sid):
data = json.loads(get_html(
'http://music.baidu.com/data/music/fmlink?songIds=%s' % sid, faker=True))['data']
if data['xcode'] != '':
# inside china mainland
return data['songList'][0]
else:
# outside china mainland
return None
def baidu_get_song_url(data):
return data['songLink']
def baidu_get_song_artist(data):
return data['artistName']
def baidu_get_song_album(data):
return data['albumName']
def baidu_get_song_title(data):
return data['songName']
def baidu_get_song_lyric(data):
lrc = data['lrcLink']
return "http://music.baidu.com%s" % lrc if lrc else None
def baidu_download_song(sid, output_dir='.', merge=True, info_only=False):
data = baidu_get_song_data(sid)
if data is not None:
url = baidu_get_song_url(data)
title = baidu_get_song_title(data)
artist = baidu_get_song_artist(data)
album = baidu_get_song_album(data)
lrc = baidu_get_song_lyric(data)
file_name = "%s - %s - %s" % (title, album, artist)
else:
html = get_html("http://music.baidu.com/song/%s" % sid)
url = r1(r'data_url="([^"]+)"', html)
title = r1(r'data_name="([^"]+)"', html)
file_name = title
type, ext, size = url_info(url, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([url], file_name, ext, size,
output_dir, merge=merge, faker=True)
try:
type, ext, size = url_info(lrc, faker=True)
print_info(site_info, title, type, size)
if not info_only:
download_urls([lrc], file_name, ext, size, output_dir, faker=True)
except:
pass
def baidu_download_album(aid, output_dir='.', merge=True, info_only=False):
html = get_html('http://music.baidu.com/album/%s' % aid, faker=True)
album_name = r1(r'<h2 class="album-name">(.+?)<\/h2>', html)
artist = r1(r'<span class="author_list" title="(.+?)">', html)
output_dir = '%s/%s - %s' % (output_dir, artist, album_name)
ids = json.loads(r1(r'<span class="album-add" data-adddata=\'(.+?)\'>',
html).replace('"', '').replace(';', '"'))['ids']
track_nr = 1
for id in ids:
song_data = baidu_get_song_data(id)
song_url = baidu_get_song_url(song_data)
song_title = baidu_get_song_title(song_data)
song_lrc = baidu_get_song_lyric(song_data)
file_name = '%02d.%s' % (track_nr, song_title)
type, ext, size = url_info(song_url, faker=True)
print_info(site_info, song_title, type, size)
if not info_only:
download_urls([song_url], file_name, ext, size,
output_dir, merge=merge, faker=True)
if song_lrc:
type, ext, size = url_info(song_lrc, faker=True)
print_info(site_info, song_title, type, size)
if not info_only:
download_urls([song_lrc], file_name, ext,
size, output_dir, faker=True)
track_nr += 1
def baidu_download(url, output_dir='.', stream_type=None, merge=True, info_only=False, **kwargs):
if re.match(r'https?://pan.baidu.com', url):
real_url, title, ext, size = baidu_pan_download(url)
print_info('BaiduPan', title, ext, size)
if not info_only:
print('Hold on...')
time.sleep(5)
download_urls([real_url], title, ext, size,
output_dir, url, merge=merge, faker=True)
elif re.match(r'http://music.baidu.com/album/\d+', url):
id = r1(r'http://music.baidu.com/album/(\d+)', url)
baidu_download_album(id, output_dir, merge, info_only)
elif re.match('http://music.baidu.com/song/\d+', url):
id = r1(r'http://music.baidu.com/song/(\d+)', url)
baidu_download_song(id, output_dir, merge, info_only)
elif re.match('http://tieba.baidu.com/', url):
try:
# embedded videos
embed_download(url, output_dir, merge=merge, info_only=info_only, **kwargs)
except:
# images
html = get_html(url)
title = r1(r'title:"([^"]+)"', html)
vhsrc = re.findall(r'"BDE_Image"[^>]+src="([^"]+\.mp4)"', html) or \
re.findall(r'vhsrc="([^"]+)"', html)
if len(vhsrc) > 0:
ext = 'mp4'
size = url_size(vhsrc[0])
print_info(site_info, title, ext, size)
if not info_only:
download_urls(vhsrc, title, ext, size,
output_dir=output_dir, merge=False)
items = re.findall(
r'//imgsrc.baidu.com/forum/w[^"]+/([^/"]+)', html)
urls = ['http://imgsrc.baidu.com/forum/pic/item/' + i
for i in set(items)]
# handle albums
kw = r1(r'kw=([^&]+)', html) or r1(r"kw:'([^']+)'", html)
tid = r1(r'tid=(\d+)', html) or r1(r"tid:'([^']+)'", html)
album_url = 'http://tieba.baidu.com/photo/g/bw/picture/list?kw=%s&tid=%s&pe=%s' % (kw, tid, 1000)
album_info = json.loads(get_content(album_url))
for i in album_info['data']['pic_list']:
urls.append(
'http://imgsrc.baidu.com/forum/pic/item/' + i['pic_id'] + '.jpg')
ext = 'jpg'
size = float('Inf')
print_info(site_info, title, ext, size)
if not info_only:
download_urls(urls, title, ext, size,
output_dir=output_dir, merge=False)
def baidu_pan_download(url):
errno_patt = r'errno":([^"]+),'
refer_url = ""
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Host': 'pan.baidu.com',
'Origin': 'http://pan.baidu.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2500.0 Safari/537.36',
'Referer': refer_url
}
if cookies:
print('Use user specified cookies')
else:
print('Generating cookies...')
fake_headers['Cookie'] = baidu_pan_gen_cookies(url)
refer_url = "http://pan.baidu.com"
html = get_content(url, fake_headers, decoded=True)
isprotected = False
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
html)
if sign == None:
if re.findall(r'\baccess-code\b', html):
isprotected = True
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk = baidu_pan_protected_share(
url)
# raise NotImplementedError("Password required!")
if isprotected != True:
raise AssertionError("Share not found or canceled: %s" % url)
if bdstoken == None:
bdstoken = ""
if isprotected != True:
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
html)
request_url = "http://pan.baidu.com/api/sharedownload?sign=%s×tamp=%s&bdstoken=%s&channel=chunlei&clienttype=0&web=1&app_id=%s" % (
sign, timestamp, bdstoken, appid)
refer_url = url
post_data = {
'encrypt': 0,
'product': 'share',
'uk': uk,
'primaryid': primary_id,
'fid_list': '[' + fs_id + ']'
}
if isprotected == True:
post_data['sekey'] = psk
response_content = post_content(request_url, fake_headers, post_data, True)
errno = match1(response_content, errno_patt)
if errno != "0":
raise AssertionError(
"Server refused to provide download link! (Errno:%s)" % errno)
real_url = r1(r'dlink":"([^"]+)"', response_content).replace('\\/', '/')
title = r1(r'server_filename":"([^"]+)"', response_content)
assert real_url
type, ext, size = url_info(real_url, faker=True)
title_wrapped = json.loads('{"wrapper":"%s"}' % title)
title = title_wrapped['wrapper']
logging.debug(real_url)
return real_url, title, ext, size
def baidu_pan_parse(html):
sign_patt = r'sign":"([^"]+)"'
timestamp_patt = r'timestamp":([^"]+),'
appid_patt = r'app_id":"([^"]+)"'
bdstoken_patt = r'bdstoken":"([^"]+)"'
fs_id_patt = r'fs_id":([^"]+),'
uk_patt = r'uk":([^"]+),'
errno_patt = r'errno":([^"]+),'
primary_id_patt = r'shareid":([^"]+),'
sign = match1(html, sign_patt)
timestamp = match1(html, timestamp_patt)
appid = match1(html, appid_patt)
bdstoken = match1(html, bdstoken_patt)
fs_id = match1(html, fs_id_patt)
uk = match1(html, uk_patt)
primary_id = match1(html, primary_id_patt)
return sign, timestamp, bdstoken, appid, primary_id, fs_id, uk
def baidu_pan_gen_cookies(url, post_data=None):
from http import cookiejar
cookiejar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookiejar))
resp = opener.open('http://pan.baidu.com')
if post_data != None:
resp = opener.open(url, bytes(parse.urlencode(post_data), 'utf-8'))
return cookjar2hdr(cookiejar)
def baidu_pan_protected_share(url):
print('This share is protected by password!')
inpwd = input('Please provide unlock password: ')
inpwd = inpwd.replace(' ', '').replace('\t', '')
print('Please wait...')
post_pwd = {
'pwd': inpwd,
'vcode': None,
'vstr': None
}
from http import cookiejar
import time
cookiejar = cookiejar.CookieJar()
opener = request.build_opener(request.HTTPCookieProcessor(cookiejar))
resp = opener.open('http://pan.baidu.com')
resp = opener.open(url)
init_url = resp.geturl()
verify_url = 'http://pan.baidu.com/share/verify?%s&t=%s&channel=chunlei&clienttype=0&web=1' % (
init_url.split('?', 1)[1], int(time.time()))
refer_url = init_url
fake_headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'UTF-8,*;q=0.5',
'Accept-Encoding': 'gzip,deflate,sdch',
'Accept-Language': 'en-US,en;q=0.8',
'Host': 'pan.baidu.com',
'Origin': 'http://pan.baidu.com',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:13.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2500.0 Safari/537.36',
'Referer': refer_url
}
opener.addheaders = dict2triplet(fake_headers)
pwd_resp = opener.open(verify_url, bytes(
parse.urlencode(post_pwd), 'utf-8'))
pwd_resp_str = ungzip(pwd_resp.read()).decode('utf-8')
pwd_res = json.loads(pwd_resp_str)
if pwd_res['errno'] != 0:
raise AssertionError(
'Server returned an error: %s (Incorrect password?)' % pwd_res['errno'])
pg_resp = opener.open('http://pan.baidu.com/share/link?%s' %
init_url.split('?', 1)[1])
content = ungzip(pg_resp.read()).decode('utf-8')
sign, timestamp, bdstoken, appid, primary_id, fs_id, uk = baidu_pan_parse(
content)
psk = query_cookiejar(cookiejar, 'BDCLND')
psk = parse.unquote(psk)
fake_headers['Cookie'] = cookjar2hdr(cookiejar)
return sign, timestamp, bdstoken, appid, primary_id, fs_id, uk, fake_headers, psk
def cookjar2hdr(cookiejar):
cookie_str = ''
for i in cookiejar:
cookie_str = cookie_str + i.name + '=' + i.value + ';'
return cookie_str[:-1]
def query_cookiejar(cookiejar, name):
for i in cookiejar:
if i.name == name:
return i.value
def dict2triplet(dictin):
out_triplet = []
for i in dictin:
out_triplet.append((i, dictin[i]))
return out_triplet
site_info = "Baidu.com"
download = baidu_download
download_playlist = playlist_not_supported("baidu")
| src/you_get/extractors/baidu.py | 12,107 | !/usr/bin/env python -*- coding: utf-8 -*- inside china mainland outside china mainland embedded videos images handle albums raise NotImplementedError("Password required!") | 172 | en | 0.528254 |
# coding: utf-8
"""
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import dlxapi
from dlxapi.models.field_description_updated_event import FieldDescriptionUpdatedEvent # noqa: E501
from dlxapi.rest import ApiException
class TestFieldDescriptionUpdatedEvent(unittest.TestCase):
"""FieldDescriptionUpdatedEvent unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFieldDescriptionUpdatedEvent(self):
"""Test FieldDescriptionUpdatedEvent"""
# FIXME: construct object with mandatory attributes with example values
# model = dlxapi.models.field_description_updated_event.FieldDescriptionUpdatedEvent() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| python/test/test_field_description_updated_event.py | 1,021 | FieldDescriptionUpdatedEvent unit test stubs
Test FieldDescriptionUpdatedEvent
Decision Lens API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = dlxapi.models.field_description_updated_event.FieldDescriptionUpdatedEvent() noqa: E501 | 501 | en | 0.495645 |
#!/usr/bin/env python
import jinja2
import os
import re
import shlex
import sys
import mkdocs.build
from mkdocs.build import build
from mkdocs.config import load_config
from urllib2 import urlopen
import subprocess
def line_containing(lines, text):
for i in range(len(lines)):
if text.lower() in lines[i].lower():
return i
raise Exception("could not find {}".format(text))
# Wrap some functions to allow custom commands in markdown
convert_markdown_original = mkdocs.build.convert_markdown
def convert_markdown_new(source, **kwargs):
def expand(match):
args = shlex.split(match.groups()[0])
# Import external markdown
if args[0] == ".import":
code = ""
try: #Try as a URL
code = urlopen(args[1]).read()
except ValueError: # invalid URL, try as a file
code = open(args[1]).read()
return code
# Run a shell command
elif args[0] == ".run":
result = ""
command = "$ " + match.groups()[0].replace(".run", "").strip()
try:
result = subprocess.check_output(args[1:], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError, e:
result = e.output
return "```\n" + command + "\n" + result.strip() + "\n```"
# Source code embeds
elif args[0] == ".code" or args[0] == ".doc":
code = ""
try: #Try as a URL
code = urlopen(args[1]).read()
except ValueError: # invalid URL, try as a file
code = open("../" + args[1]).read()
lines = code.splitlines()
# Short hand for specifying a region
if len(args) == 3:
region = args[2]
args[2] = "START " + region
args.append("END " + region)
if len(args) == 4:
start = 1
end = len(lines) - 1
try:
if args[2].isdigit(): start = int(args[2])
else:
start = line_containing(lines, args[2]) + 1
if args[3].isdigit(): end = int(args[3])
else: end = line_containing(lines, args[3]) + 1
except Exception, e: # If line_containing fails
print "Error: {}".format(e)
print " in {}".format(args[1])
sys.exit(1)
#TODO: Also allow regex matching
lines = lines[start - 1:end]
# Trim "OMIT" lines. Ignore "*/".
lines = filter(lambda x: not x.strip().rstrip("*/").rstrip().lower().endswith("omit"), lines)
# TODO: Trim leading and trailing empty lines
if args[0] == ".code":
lines.insert(0, "```go")
lines.append("```")
# else: # args[0] == ".doc"
# lines.insert(0, "\n")
# lines.insert("\n")
return "\n".join(lines)
# No matching logic
else:
return match.group(0)
# Process an aritrary number of expansions.
oldSource = ""
while source != oldSource:
oldSource = source
source = re.sub("\[\[(.*)\]\]", expand, oldSource)
return convert_markdown_original(source)
# Hotpatch in the markdown conversion wrapper
mkdocs.build.convert_markdown = convert_markdown_new
if __name__ == "__main__":
# Build documentation
config = load_config(options=None)
build(config)
# Load templates
template_env = jinja2.Environment(loader = jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'theme')))
index_template = template_env.get_template('home.html')
community_template = template_env.get_template('community.html')
# Home page
with open('site/index.html', 'w') as f:
f.write(index_template.render(
page="home"
))
# Community page
with open('site/community.html', 'w') as f:
f.write(community_template.render(
page="community"
))
| docs/buildsite.py | 3,663 | !/usr/bin/env python Wrap some functions to allow custom commands in markdown Import external markdownTry as a URL invalid URL, try as a file Run a shell command Source code embedsTry as a URL invalid URL, try as a file Short hand for specifying a region If line_containing failsTODO: Also allow regex matching Trim "OMIT" lines. Ignore "*/". TODO: Trim leading and trailing empty lines else: args[0] == ".doc" lines.insert(0, "\n") lines.insert("\n") No matching logic Process an aritrary number of expansions. Hotpatch in the markdown conversion wrapper Build documentation Load templates Home page Community page | 620 | en | 0.660064 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of moses. Its use is licensed under the GNU Lesser General
# Public License version 2.1 or, at your option, any later version.
from __future__ import print_function, unicode_literals
import logging
import argparse
import subprocess
import sys
import os
import codecs
# ../bilingual-lm
sys.path.append(os.path.join(os.path.dirname(sys.path[0]), 'bilingual-lm'))
import train_nplm
import extract_vocab
import extract_syntactic_ngrams
logging.basicConfig(
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument(
"--working-dir", dest="working_dir", metavar="PATH")
parser.add_argument(
"--corpus", '-text', dest="corpus_stem", metavar="PATH", help="Input file.")
parser.add_argument(
"--nplm-home", dest="nplm_home", metavar="PATH", required=True,
help="Location of NPLM.")
parser.add_argument(
"--epochs", dest="epochs", type=int, metavar="INT",
help="Number of training epochs (default: %(default)s).")
parser.add_argument(
"--up-context-size", dest="up_context_size", type=int, metavar="INT",
help="Size of ancestor context (default: %(default)s).")
parser.add_argument(
"--left-context-size", dest="left_context_size", type=int, metavar="INT",
help="Size of sibling context (left) (default: %(default)s).")
parser.add_argument(
"--right-context-size", dest="right_context_size", type=int,
metavar="INT",
help="Size of sibling context (right) (default: %(default)s).")
parser.add_argument(
"--mode", dest="mode", choices=['head', 'label'], required=True,
help="Type of RDLM to train (both are required for decoding).")
parser.add_argument(
"--minibatch-size", dest="minibatch_size", type=int, metavar="INT",
help="Minibatch size (default: %(default)s).")
parser.add_argument(
"--noise", dest="noise", type=int, metavar="INT",
help="Number of noise samples for NCE (default: %(default)s).")
parser.add_argument(
"--hidden", dest="hidden", type=int, metavar="INT",
help=(
"Size of hidden layer (0 for single hidden layer) "
"(default: %(default)s)"))
parser.add_argument(
"--input-embedding", dest="input_embedding", type=int, metavar="INT",
help="Size of input embedding layer (default: %(default)s).")
parser.add_argument(
"--output-embedding", dest="output_embedding", type=int, metavar="INT",
help="Size of output embedding layer (default: %(default)s).")
parser.add_argument(
"--threads", "-t", dest="threads", type=int, metavar="INT",
help="Number of threads (default: %(default)s).")
parser.add_argument(
"--output-model", dest="output_model", metavar="PATH",
help="Name of output model (default: %(default)s).")
parser.add_argument(
"--output-dir", dest="output_dir", metavar="PATH",
help="Output directory (default: same as working-dir).")
parser.add_argument(
"--config-options-file", dest="config_options_file", metavar="PATH")
parser.add_argument(
"--log-file", dest="log_file", metavar="PATH",
help="Log file to write to (default: %(default)s).")
parser.add_argument(
"--validation-corpus", dest="validation_corpus", metavar="PATH",
help="Validation file (default: %(default)s).")
parser.add_argument(
"--activation-function", dest="activation_fn",
choices=['identity', 'rectifier', 'tanh', 'hardtanh'],
help="Activation function (default: %(default)s).")
parser.add_argument(
"--learning-rate", dest="learning_rate", type=float, metavar="FLOAT",
help="Learning rate (default: %(default)s).")
parser.add_argument(
"--input-words-file", dest="input_words_file", metavar="PATH",
help="Input vocabulary (default: %(default)s).")
parser.add_argument(
"--output-words-file", dest="output_words_file", metavar="PATH",
help="Output vocabulary (default: %(default)s).")
parser.add_argument(
"--input-vocab-size", dest="input_vocab_size", type=int, metavar="INT",
help="Input vocabulary size (default: %(default)s).")
parser.add_argument(
"--output-vocab-size", dest="output_vocab_size", type=int, metavar="INT",
help="Output vocabulary size (default: %(default)s).")
parser.add_argument(
"--mmap", dest="mmap", action="store_true",
help="Use memory-mapped file (for lower memory consumption).")
parser.add_argument(
"--train-host", dest="train_host",
help="Execute nplm training on this host, via ssh")
parser.add_argument("--extra-settings", dest="extra_settings",
help="Extra settings to be passed to NPLM")
parser.set_defaults(
working_dir="working",
corpus_stem="train",
nplm_home="/home/bhaddow/tools/nplm",
epochs=2,
up_context_size=2,
left_context_size=3,
right_context_size=0,
minibatch_size=1000,
noise=100,
hidden=0,
mode='head',
input_embedding=150,
output_embedding=750,
threads=4,
output_model="train",
output_dir=None,
config_options_file="config",
log_file="log",
validation_corpus=None,
activation_fn="rectifier",
learning_rate=1,
input_words_file=None,
output_words_file=None,
input_vocab_size=500000,
output_vocab_size=500000)
def prepare_vocabulary(options):
vocab_prefix = os.path.join(options.working_dir, 'vocab')
extract_vocab_options = extract_vocab.create_parser().parse_args(
['--input', options.corpus_stem, '--output', vocab_prefix])
extract_vocab.main(extract_vocab_options)
if options.input_words_file is None:
options.input_words_file = vocab_prefix + '.input'
orig = vocab_prefix + '.all'
filtered_vocab = open(orig).readlines()
if options.input_vocab_size:
filtered_vocab = filtered_vocab[:options.input_vocab_size]
open(options.input_words_file, 'w').writelines(filtered_vocab)
if options.output_words_file is None:
options.output_words_file = vocab_prefix + '.output'
if options.mode == 'label':
blacklist = [
'<null',
'<root',
'<start_head',
'<dummy',
'<head_head',
'<stop_head',
]
orig = vocab_prefix + '.special'
filtered_vocab = open(orig).readlines()
orig = vocab_prefix + '.nonterminals'
filtered_vocab += open(orig).readlines()
filtered_vocab = [
word
for word in filtered_vocab
if not any(word.startswith(prefix) for prefix in blacklist)]
if options.output_vocab_size:
filtered_vocab = filtered_vocab[:options.output_vocab_size]
else:
orig = vocab_prefix + '.all'
filtered_vocab = open(orig).readlines()[:options.output_vocab_size]
open(options.output_words_file, 'w').writelines(filtered_vocab)
def main(options):
if options.output_dir is None:
options.output_dir = options.working_dir
else:
# Create output dir if necessary
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
options.ngram_size = (
2 * options.up_context_size +
2 * options.left_context_size +
2 * options.right_context_size
)
if options.mode == 'head':
options.ngram_size += 2
elif options.mode == 'label':
options.ngram_size += 1
if options.input_words_file is None or options.output_words_file is None:
sys.stderr.write(
"Either input vocabulary or output vocabulary not specified: "
"extracting vocabulary from training text.\n")
prepare_vocabulary(options)
numberized_file = os.path.basename(options.corpus_stem) + '.numberized'
train_file = numberized_file
if options.mmap:
train_file += '.mmap'
extract_options = extract_syntactic_ngrams.create_parser().parse_args([
'--input', options.corpus_stem,
'--output', os.path.join(options.working_dir, numberized_file),
'--vocab', options.input_words_file,
'--output_vocab', options.output_words_file,
'--right_context', str(options.right_context_size),
'--left_context', str(options.left_context_size),
'--up_context', str(options.up_context_size),
'--mode', options.mode
])
sys.stderr.write('extracting syntactic n-grams\n')
extract_syntactic_ngrams.main(extract_options)
if options.validation_corpus:
extract_options.input = open(options.validation_corpus)
options.validation_file = os.path.join(
options.working_dir, os.path.basename(options.validation_corpus))
extract_options.output = open(
options.validation_file + '.numberized', 'w')
sys.stderr.write('extracting syntactic n-grams (validation file)\n')
extract_syntactic_ngrams.main(extract_options)
extract_options.output.close()
else:
options.validation_file = None
if options.mmap:
try:
os.remove(os.path.join(options.working_dir, train_file))
except OSError:
pass
mmap_cmd = [os.path.join(options.nplm_home, 'src', 'createMmap'),
'--input_file',
os.path.join(options.working_dir, numberized_file),
'--output_file',
os.path.join(options.working_dir, train_file)
]
sys.stderr.write('creating memory-mapped file\n')
sys.stderr.write('executing: ' + ', '.join(mmap_cmd) + '\n')
ret = subprocess.call(mmap_cmd)
if ret:
raise Exception("creating memory-mapped file failed")
sys.stderr.write('training neural network\n')
train_nplm.main(options)
sys.stderr.write('averaging null words\n')
ret = subprocess.call([
os.path.join(sys.path[0], 'average_null_embedding.py'),
options.nplm_home,
os.path.join(
options.output_dir,
options.output_model + '.model.nplm.' + str(options.epochs)),
os.path.join(
options.working_dir,
numberized_file),
os.path.join(options.output_dir, options.output_model + '.model.nplm')
])
if ret:
raise Exception("averaging null words failed")
if __name__ == "__main__":
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
options = parser.parse_known_args()[0]
if parser.parse_known_args()[1]:
sys.stderr.write('Warning: unknown arguments: {0}\n'.format(parser.parse_known_args()[1]))
main(options)
| examples/mosesdecoder-master/scripts/training/rdlm/train_rdlm.py | 10,825 | !/usr/bin/env python -*- coding: utf-8 -*- This file is part of moses. Its use is licensed under the GNU Lesser General Public License version 2.1 or, at your option, any later version. ../bilingual-lm Create output dir if necessary | 233 | en | 0.814978 |
# qubit number=3
# total number=14
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.cx(input_qubit[3],input_qubit[0]) # number=11
prog.z(input_qubit[3]) # number=12
prog.cx(input_qubit[3],input_qubit[0]) # number=13
prog.z(input_qubit[1]) # number=8
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[3],input_qubit[0]) # number=5
prog.swap(input_qubit[3],input_qubit[0]) # number=6
prog.x(input_qubit[3]) # number=9
prog.x(input_qubit[3]) # number=10
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =5600
writefile = open("../data/startQiskit_noisy696.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
backend = FakeYorktown()
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| benchmark/startQiskit_noisy696.py | 2,543 | qubit number=3 total number=14 circuit begin number=1 number=11 number=12 number=13 number=8 number=3 number=4 number=5 number=6 number=9 number=10 circuit end prog.draw('mpl', filename=(kernel + '.png')) | 204 | en | 0.159633 |
"""Provides the MenuItem class."""
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union
from attr import attrs
from ..action import ActionFunctionType
from ..mixins import RegisterEventMixin
if TYPE_CHECKING:
from ..types import TitleFunction
@attrs(auto_attribs=True)
class MenuItem(RegisterEventMixin):
"""An item in a :class:`~earwax.menu.Menu`.
This class is rarely used directly, instead
:meth:`earwax.menu.Menu.add_item` or :meth:`earwax.menu.Menu.item` can be
used to return an instance.
:ivar ~earwax.MenuItem.func: The function which will be called when this
item is activated.
:ivar ~earwax.MenuItem.title: The title of this menu item.
If this value is a callable, it should return a string which will be
used as the title.
:ivar ~earwax.MenuItem.select_sound_path: The path to a sound which should
play when this menu item is selected.
If this value is ``None`` (the default), then no sound will be heard
unless the containing menu has its
:attr:`~earwax.Menu.item_select_sound_path` attribute set to something
that is not ``None``, or
:attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not
``None``.
:ivar ~earwax.MenuItem.activate_sound_path: The path to a sound which
should play when this menu item is activated.
If this value is ``None`` (the default), then no sound will be heard
unless the containing menu has its
:attr:`~earwax.Menu.item_activate_sound_path` attribute set to
something that is not ``None``, or
:attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not
``None``.
"""
func: ActionFunctionType
title: Optional[Union[str, "TitleFunction"]] = None
select_sound_path: Optional[Path] = None
loop_select_sound: bool = False
activate_sound_path: Optional[Path] = None
def __attrs_post_init__(self) -> None:
"""Register events."""
self.register_event(self.on_selected)
def get_title(self) -> Optional[str]:
"""Return the proper title of this object.
If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,
its return value will be returned.
"""
if callable(self.title):
return self.title()
return self.title
def on_selected(self) -> None:
"""Handle this menu item being selected."""
pass
| earwax/menus/menu_item.py | 2,490 | An item in a :class:`~earwax.menu.Menu`.
This class is rarely used directly, instead
:meth:`earwax.menu.Menu.add_item` or :meth:`earwax.menu.Menu.item` can be
used to return an instance.
:ivar ~earwax.MenuItem.func: The function which will be called when this
item is activated.
:ivar ~earwax.MenuItem.title: The title of this menu item.
If this value is a callable, it should return a string which will be
used as the title.
:ivar ~earwax.MenuItem.select_sound_path: The path to a sound which should
play when this menu item is selected.
If this value is ``None`` (the default), then no sound will be heard
unless the containing menu has its
:attr:`~earwax.Menu.item_select_sound_path` attribute set to something
that is not ``None``, or
:attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not
``None``.
:ivar ~earwax.MenuItem.activate_sound_path: The path to a sound which
should play when this menu item is activated.
If this value is ``None`` (the default), then no sound will be heard
unless the containing menu has its
:attr:`~earwax.Menu.item_activate_sound_path` attribute set to
something that is not ``None``, or
:attr:`earwax.EarwaxConfig.menus.default_item_select_sound` is not
``None``.
Register events.
Return the proper title of this object.
If :attr:`self.title <earwax.mixins.TitleMixin.title>` is a callable,
its return value will be returned.
Handle this menu item being selected.
Provides the MenuItem class. | 1,516 | en | 0.745645 |
# -*- coding: utf-8 -*-
from django.urls import path
app_name = 'login'
urlpatterns = []
| apps/login/urls.py | 95 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
from datetime import datetime
import numpy as np
import re
from bamboo.core.frame import RESERVED_KEYS
from bamboo.core.parser import Parser
from bamboo.lib.exceptions import ArgumentError
from bamboo.lib.mongo import reserve_encoded
CARDINALITY = 'cardinality'
OLAP_TYPE = 'olap_type'
SIMPLETYPE = 'simpletype'
LABEL = 'label'
# olap_types
DIMENSION = 'dimension'
MEASURE = 'measure'
# simpletypes
BOOLEAN = 'boolean'
DATETIME = 'datetime'
INTEGER = 'integer'
FLOAT = 'float'
STRING = 'string'
# map from numpy objects to olap_types
DTYPE_TO_OLAP_TYPE = {
np.object_: DIMENSION,
np.bool_: DIMENSION,
np.float64: MEASURE,
np.int64: MEASURE,
datetime: MEASURE,
}
# map from numpy objects to simpletypes
DTYPE_TO_SIMPLETYPE = {
np.bool_: BOOLEAN,
np.float64: FLOAT,
np.int64: INTEGER,
np.object_: STRING,
datetime: DATETIME,
}
SIMPLETYPE_TO_DTYPE = {
FLOAT: np.float64,
INTEGER: np.int64,
}
SIMPLETYPE_TO_OLAP_TYPE = {
v: DTYPE_TO_OLAP_TYPE[k] for (k, v) in DTYPE_TO_SIMPLETYPE.items()}
RE_ENCODED_COLUMN = re.compile(ur'(?u)\W')
class Schema(dict):
@classmethod
def safe_init(cls, arg):
"""Make schema with potential arg of None."""
return cls() if arg is None else cls(arg)
@property
def labels_to_slugs(self):
"""Build dict from column labels to slugs."""
return {
column_attrs[LABEL]: reserve_encoded(column_name) for
(column_name, column_attrs) in self.items()
}
@property
def numerics(self):
return [slug for slug, col_schema in self.items()
if col_schema[SIMPLETYPE] in [INTEGER, FLOAT]]
@property
def numerics_select(self):
return {col: 1 for col in self.numerics}
def cardinality(self, column):
if self.is_dimension(column):
return self[column].get(CARDINALITY)
def convert_type(self, slug, value):
column_schema = self.get(slug)
if column_schema:
type_func = SIMPLETYPE_TO_DTYPE.get(column_schema[SIMPLETYPE])
if type_func:
value = type_func(value)
return value
def datetimes(self, intersect_with):
return [slug for slug, col in self.items()
if col[SIMPLETYPE] == DATETIME and slug in intersect_with]
def is_date_simpletype(self, column):
return self[column][SIMPLETYPE] == DATETIME
def is_dimension(self, column):
col_schema = self.get(column)
return col_schema and col_schema[OLAP_TYPE] == DIMENSION
def rebuild(self, dframe, overwrite=False):
"""Rebuild a schema for a dframe.
:param dframe: The DataFrame whose schema to merge with the current
schema.
:param overwrite: If true replace schema, otherwise update.
"""
current_schema = self
new_schema = schema_from_dframe(dframe, self)
if current_schema and not overwrite:
# merge new schema with existing schema
current_schema.update(new_schema)
new_schema = current_schema
return new_schema
def rename_map_for_dframe(self, dframe):
"""Return a map from dframe columns to slugs.
:param dframe: The DataFrame to produce the map for.
"""
labels_to_slugs = self.labels_to_slugs
return {
column: labels_to_slugs[column] for column in
dframe.columns.tolist() if self._resluggable_column(
column, labels_to_slugs, dframe)
}
def set_olap_type(self, column, olap_type):
"""Set the OLAP Type for this `column` of schema.
Only columns with an original OLAP Type of 'measure' can be modified.
This includes columns with Simple Type integer, float, and datetime.
:param column: The column to set the OLAP Type for.
:param olap_type: The OLAP Type to set. Must be 'dimension' or
'measure'.
:raises: `ArgumentError` if trying to set the OLAP Type of an column
whose OLAP Type was not originally a 'measure'.
"""
self[column][OLAP_TYPE] = olap_type
def _resluggable_column(self, column, labels_to_slugs, dframe):
"""Test if column should be slugged.
A column should be slugged if:
1. The `column` is a key in `labels_to_slugs` and
2. The `column` is not a value in `labels_to_slugs` or
1. The `column` label is not equal to the `column` slug and
2. The slug is not in the `dframe`'s columns
:param column: The column to reslug.
:param labels_to_slugs: The labels to slugs map (only build once).
:param dframe: The DataFrame that column is in.
"""
return (column in labels_to_slugs.keys() and (
not column in labels_to_slugs.values() or (
labels_to_slugs[column] != column and
labels_to_slugs[column] not in dframe.columns)))
def schema_from_dframe(dframe, schema=None):
"""Build schema from the DataFrame and a schema.
:param dframe: The DataFrame to build a schema for.
:param schema: Existing schema, optional.
:returns: A dictionary schema.
"""
dtypes = dframe.dtypes.to_dict()
column_names = list()
names_to_labels = dict()
# use existing labels for existing columns
for name in dtypes.keys():
if name not in RESERVED_KEYS:
column_names.append(name)
if schema:
schema_for_name = schema.get(name)
if schema_for_name:
names_to_labels[name] = schema_for_name[
LABEL]
encoded_names = dict(zip(column_names, _slugify_columns(column_names)))
schema = Schema()
for (name, dtype) in dtypes.items():
if name not in RESERVED_KEYS:
column_schema = {
LABEL: names_to_labels.get(name, name),
OLAP_TYPE: _olap_type_for_data_and_dtype(
dframe[name], dtype),
SIMPLETYPE: _simpletype_for_data_and_dtype(
dframe[name], dtype),
}
try:
column_schema[CARDINALITY] = dframe[
name].nunique()
except AttributeError:
pass
except TypeError:
# E.g. dates with and without offset can not be compared and
# raise a type error.
pass
schema[encoded_names[name]] = column_schema
return schema
def _slugify_columns(column_names):
"""Convert list of strings into unique slugs.
Convert non-alphanumeric characters in column names into underscores and
ensure that all column names are unique.
:param column_names: A list of strings.
:returns: A list of slugified names with a one-to-one mapping to
`column_names`.
"""
encoded_names = []
for column_name in column_names:
slug = RE_ENCODED_COLUMN.sub('_', column_name).lower()
slug = make_unique(slug, encoded_names + Parser.reserved_words)
encoded_names.append(slug)
return encoded_names
def make_unique(name, reserved_names):
"""Return a slug ensuring name is not in `reserved_names`.
:param name: The name to make unique.
:param reserved_names: A list of names the column must not be included in.
"""
while name in reserved_names:
name += '_'
return name
def filter_schema(schema):
"""Remove not settable columns."""
for column, column_schema in schema.iteritems():
if column_schema.get(CARDINALITY):
del column_schema[CARDINALITY]
schema[column] = column_schema
return schema
def _olap_type_for_data_and_dtype(column, dtype):
return _type_for_data_and_dtypes(
DTYPE_TO_OLAP_TYPE, column, dtype.type)
def _simpletype_for_data_and_dtype(column, dtype):
return _type_for_data_and_dtypes(
DTYPE_TO_SIMPLETYPE, column, dtype.type)
def _type_for_data_and_dtypes(type_map, column, dtype_type):
has_datetime = any([isinstance(field, datetime) for field in column])
return type_map[datetime if has_datetime else dtype_type]
| bamboo/lib/schema_builder.py | 8,261 | olap_types simpletypes map from numpy objects to olap_types map from numpy objects to simpletypes merge new schema with existing schema use existing labels for existing columns E.g. dates with and without offset can not be compared and raise a type error. | 255 | en | 0.58207 |
import numpy as np
import theano.tensor as tt
from pymc3.util import get_variable_name
from ..math import logsumexp
from .dist_math import bound
from .distribution import Discrete, Distribution, draw_values, generate_samples
from .continuous import get_tau_sd, Normal
def all_discrete(comp_dists):
"""
Determine if all distributions in comp_dists are discrete
"""
if isinstance(comp_dists, Distribution):
return isinstance(comp_dists, Discrete)
else:
return all(isinstance(comp_dist, Discrete) for comp_dist in comp_dists)
class Mixture(Distribution):
R"""
Mixture log-likelihood
Often used to model subpopulation heterogeneity
.. math:: f(x \mid w, \theta) = \sum_{i = 1}^n w_i f_i(x \mid \theta_i)
======== ============================================
Support :math:`\cap_{i = 1}^n \textrm{support}(f_i)`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
======== ============================================
Parameters
----------
w : array of floats
w >= 0 and w <= 1
the mixture weights
comp_dists : multidimensional PyMC3 distribution (e.g. `pm.Poisson.dist(...)`)
or iterable of one-dimensional PyMC3 distributions the
component distributions :math:`f_1, \ldots, f_n`
Example
-------
.. code-block:: python
# 2-Mixture Poisson distribution
with pm.Model() as model:
lam = pm.Exponential('lam', lam=1, shape=(2,)) # `shape=(2,)` indicates two mixtures.
# As we just need the logp, rather than add a RV to the model, we need to call .dist()
components = pm.Poisson.dist(mu=lam, shape=(2,))
w = pm.Dirichlet('w', a=np.array([1, 1])) # two mixture component weights.
like = pm.Mixture('like', w=w, comp_dists=components, observed=data)
# 2-Mixture Poisson using iterable of distributions.
with pm.Model() as model:
lam1 = pm.Exponential('lam1', lam=1)
lam2 = pm.Exponential('lam2', lam=1)
pois1 = pm.Poisson.dist(mu=lam1)
pois2 = pm.Poisson.dist(mu=lam2)
w = pm.Dirichlet('w', a=np.array([1, 1]))
like = pm.Mixture('like', w=w, comp_dists = [pois1, pois2], observed=data)
"""
def __init__(self, w, comp_dists, *args, **kwargs):
shape = kwargs.pop('shape', ())
self.w = w = tt.as_tensor_variable(w)
self.comp_dists = comp_dists
defaults = kwargs.pop('defaults', [])
if all_discrete(comp_dists):
dtype = kwargs.pop('dtype', 'int64')
else:
dtype = kwargs.pop('dtype', 'float64')
try:
self.mean = (w * self._comp_means()).sum(axis=-1)
if 'mean' not in defaults:
defaults.append('mean')
except AttributeError:
pass
try:
comp_modes = self._comp_modes()
comp_mode_logps = self.logp(comp_modes)
self.mode = comp_modes[tt.argmax(w * comp_mode_logps, axis=-1)]
if 'mode' not in defaults:
defaults.append('mode')
except AttributeError:
pass
super(Mixture, self).__init__(shape, dtype, defaults=defaults,
*args, **kwargs)
def _comp_logp(self, value):
comp_dists = self.comp_dists
try:
value_ = value if value.ndim > 1 else tt.shape_padright(value)
return comp_dists.logp(value_)
except AttributeError:
return tt.stack([comp_dist.logp(value) for comp_dist in comp_dists],
axis=1)
def _comp_means(self):
try:
return tt.as_tensor_variable(self.comp_dists.mean)
except AttributeError:
return tt.stack([comp_dist.mean for comp_dist in self.comp_dists],
axis=1)
def _comp_modes(self):
try:
return tt.as_tensor_variable(self.comp_dists.mode)
except AttributeError:
return tt.stack([comp_dist.mode for comp_dist in self.comp_dists],
axis=1)
def _comp_samples(self, point=None, size=None, repeat=None):
try:
samples = self.comp_dists.random(point=point, size=size, repeat=repeat)
except AttributeError:
samples = np.column_stack([comp_dist.random(point=point, size=size, repeat=repeat)
for comp_dist in self.comp_dists])
return np.squeeze(samples)
def logp(self, value):
w = self.w
return bound(logsumexp(tt.log(w) + self._comp_logp(value), axis=-1).sum(),
w >= 0, w <= 1, tt.allclose(w.sum(axis=-1), 1),
broadcast_conditions=False)
def random(self, point=None, size=None, repeat=None):
def random_choice(*args, **kwargs):
w = kwargs.pop('w')
w /= w.sum(axis=-1, keepdims=True)
k = w.shape[-1]
if w.ndim > 1:
return np.row_stack([np.random.choice(k, p=w_) for w_ in w])
else:
return np.random.choice(k, p=w, *args, **kwargs)
w = draw_values([self.w], point=point)[0]
w_samples = generate_samples(random_choice,
w=w,
broadcast_shape=w.shape[:-1] or (1,),
dist_shape=self.shape,
size=size).squeeze()
comp_samples = self._comp_samples(point=point, size=size, repeat=repeat)
if comp_samples.ndim > 1:
return np.squeeze(comp_samples[np.arange(w_samples.size), w_samples])
else:
return np.squeeze(comp_samples[w_samples])
class NormalMixture(Mixture):
R"""
Normal mixture log-likelihood
.. math::
f(x \mid w, \mu, \sigma^2) = \sum_{i = 1}^n w_i N(x \mid \mu_i, \sigma^2_i)
======== =======================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
Variance :math:`\sum_{i = 1}^n w_i^2 \sigma^2_i`
======== =======================================
Parameters
----------
w : array of floats
w >= 0 and w <= 1
the mixture weights
mu : array of floats
the component means
sd : array of floats
the component standard deviations
tau : array of floats
the component precisions
Note: You only have to pass in sd or tau, but not both.
"""
def __init__(self, w, mu, *args, **kwargs):
_, sd = get_tau_sd(tau=kwargs.pop('tau', None),
sd=kwargs.pop('sd', None))
distshape = np.broadcast(mu, sd).shape
self.mu = mu = tt.as_tensor_variable(mu)
self.sd = sd = tt.as_tensor_variable(sd)
if not distshape:
distshape = np.broadcast(mu.tag.test_value, sd.tag.test_value).shape
super(NormalMixture, self).__init__(w, Normal.dist(mu, sd=sd, shape=distshape),
*args, **kwargs)
def _repr_latex_(self, name=None, dist=None):
if dist is None:
dist = self
mu = dist.mu
w = dist.w
sd = dist.sd
name = r'\text{%s}' % name
return r'${} \sim \text{{NormalMixture}}(\mathit{{w}}={},~\mathit{{mu}}={},~\mathit{{sigma}}={})$'.format(name,
get_variable_name(w),
get_variable_name(mu),
get_variable_name(sd))
| pymc3/distributions/mixture.py | 7,725 | Mixture log-likelihood
Often used to model subpopulation heterogeneity
.. math:: f(x \mid w, \theta) = \sum_{i = 1}^n w_i f_i(x \mid \theta_i)
======== ============================================
Support :math:`\cap_{i = 1}^n \textrm{support}(f_i)`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
======== ============================================
Parameters
----------
w : array of floats
w >= 0 and w <= 1
the mixture weights
comp_dists : multidimensional PyMC3 distribution (e.g. `pm.Poisson.dist(...)`)
or iterable of one-dimensional PyMC3 distributions the
component distributions :math:`f_1, \ldots, f_n`
Example
-------
.. code-block:: python
# 2-Mixture Poisson distribution
with pm.Model() as model:
lam = pm.Exponential('lam', lam=1, shape=(2,)) # `shape=(2,)` indicates two mixtures.
# As we just need the logp, rather than add a RV to the model, we need to call .dist()
components = pm.Poisson.dist(mu=lam, shape=(2,))
w = pm.Dirichlet('w', a=np.array([1, 1])) # two mixture component weights.
like = pm.Mixture('like', w=w, comp_dists=components, observed=data)
# 2-Mixture Poisson using iterable of distributions.
with pm.Model() as model:
lam1 = pm.Exponential('lam1', lam=1)
lam2 = pm.Exponential('lam2', lam=1)
pois1 = pm.Poisson.dist(mu=lam1)
pois2 = pm.Poisson.dist(mu=lam2)
w = pm.Dirichlet('w', a=np.array([1, 1]))
like = pm.Mixture('like', w=w, comp_dists = [pois1, pois2], observed=data)
Normal mixture log-likelihood
.. math::
f(x \mid w, \mu, \sigma^2) = \sum_{i = 1}^n w_i N(x \mid \mu_i, \sigma^2_i)
======== =======================================
Support :math:`x \in \mathbb{R}`
Mean :math:`\sum_{i = 1}^n w_i \mu_i`
Variance :math:`\sum_{i = 1}^n w_i^2 \sigma^2_i`
======== =======================================
Parameters
----------
w : array of floats
w >= 0 and w <= 1
the mixture weights
mu : array of floats
the component means
sd : array of floats
the component standard deviations
tau : array of floats
the component precisions
Note: You only have to pass in sd or tau, but not both.
Determine if all distributions in comp_dists are discrete | 2,260 | en | 0.591713 |
'''
Class containing a Massey-style model and rankings of a season.
todo: documentation
todo: type hints
todo: inherit from Model?
'''
class Massey:
def __init__(self):
'''
todo: this. what fields does it need?
'''
pass
def rank(self) -> List[Team]:
'''
Given a matrix, create a power ranking of the teams
'''
pass
def predict_bracket(self) -> Bracket:
'''
Given a ranking of the teams, and the draw for the bracket, predict who wins and stuff
'''
pass
@staticmethod
def from_file(filename: str) -> Massey:
'''
todo: docs
todo: weighting param?
parse teams and games from file
create matrix from teams and games
'''
pass | src/models/Massey.py | 802 | todo: this. what fields does it need?
todo: docs
todo: weighting param?
parse teams and games from file
create matrix from teams and games
Given a ranking of the teams, and the draw for the bracket, predict who wins and stuff
Given a matrix, create a power ranking of the teams
Class containing a Massey-style model and rankings of a season.
todo: documentation
todo: type hints
todo: inherit from Model? | 406 | en | 0.833604 |
# -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------------
# directory.py - parse directory information
# -----------------------------------------------------------------------------
# $Id$
#
# -----------------------------------------------------------------------------
# kaa-Metadata - Media Metadata for Python
# Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer
#
# First Edition: Dirk Meyer <dischi@freevo.org>
# Maintainer: Dirk Meyer <dischi@freevo.org>
#
# Please see the file AUTHORS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------------
__all__ = ['Parser']
# python imports
import os
import logging
# kaa imports
import kaa
# kaa.metadata imports
import kaa.metadata.core as core
from kaa.metadata.image.core import BinsParser
# get logging object
log = logging.getLogger('metadata')
class Directory(core.Media):
"""
Simple parser for reading a .directory file.
"""
media = core.MEDIA_DIRECTORY
def __init__(self, directory):
core.Media.__init__(self)
# search .directory
info = os.path.join(directory, '.directory')
if os.path.isfile(info):
f = open(info)
for l in f.readlines():
if l.startswith('Icon='):
image = l[5:].strip()
if not image.startswith('/'):
image = os.path.join(directory, image)
if os.path.isfile(image):
self._set('image', image)
if l.startswith('Name='):
self.title = l[5:].strip()
if l.startswith('Comment='):
self.comment = l[8:].strip()
f.close()
# search album.xml (bins)
binsxml = os.path.join(directory, 'album.xml')
if os.path.isfile(binsxml):
bins = BinsParser(binsxml)
for key, value in list(bins.items()):
if key == 'sampleimage':
image = os.path.join(directory, kaa.unicode_to_str(value))
if os.path.isfile(image):
self._set('image', image)
continue
self._set(key, value)
# find folder.jpg (windows style cover)
folderjpg = os.path.join(directory, 'folder.jpg')
if os.path.isfile(folderjpg):
self._set('image', folderjpg)
self.mime = 'text/directory'
Parser = Directory
| external/metadata/misc/directory.py | 3,236 | Simple parser for reading a .directory file.
-*- coding: iso-8859-1 -*- ----------------------------------------------------------------------------- directory.py - parse directory information ----------------------------------------------------------------------------- $Id$ ----------------------------------------------------------------------------- kaa-Metadata - Media Metadata for Python Copyright (C) 2003-2006 Thomas Schueppel, Dirk Meyer First Edition: Dirk Meyer <dischi@freevo.org> Maintainer: Dirk Meyer <dischi@freevo.org> Please see the file AUTHORS for a complete list of authors. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MER- CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ----------------------------------------------------------------------------- python imports kaa imports kaa.metadata imports get logging object search .directory search album.xml (bins) find folder.jpg (windows style cover) | 1,499 | en | 0.625666 |
from Transform.Transform import *
qhost = '10.0.0.10'
qport = 5100
bucket_name = 's3a://insighttmpbucket1/'
index_name = bucket_name + 'index.txt'
tickers = get_stock_list(index_name)
q_con, flint_con, spark_con = connect(qhost, qport)
#push_raw_table(q_con, spark_con, flint_con, bucket_name, tickers)
push_returns(q_con, spark_con, flint_con, bucket_name, tickers)
| src/main.py | 370 | push_raw_table(q_con, spark_con, flint_con, bucket_name, tickers) | 65 | en | 0.426143 |
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by David J Turner (david.turner@sussex.ac.uk) 21/01/2021, 11:45. Copyright (c) David J Turner
from .fit import single_temp_apec, power_law, single_temp_apec_profile
from .run import execute_cmd, xspec_call
| xga/xspec/__init__.py | 338 | This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS). Last modified by David J Turner (david.turner@sussex.ac.uk) 21/01/2021, 11:45. Copyright (c) David J Turner | 216 | en | 0.735622 |
for i in range(1,int(input())+1): #More than 2 lines will result in 0 score. Do not leave a blank line also
print((pow(int(pow(10, i)//9), 2)))
| Python/Math/triangle_quest_2.py | 160 | More than 2 lines will result in 0 score. Do not leave a blank line also | 72 | en | 0.945748 |
"""trefechanwen URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from . import views as views
from rest_framework import routers
from bookings import views as bookings_views
router = routers.DefaultRouter()
router.register(r'availabilitydates', bookings_views.AvailabilityDateViewSet, base_name='AvailabilityDates')
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^cottage$', views.cottage, name='cottage'),
url(r'^barn$', views.barn, name='barn'),
url(r'^availability$', views.availability, name='availability'),
url(r'^localinfo$', views.localinfo, name='localinfo'),
url(r'^location$', views.location, name='location'),
url(r'^walking$', views.walking, name='walking'),
url(r'^beaches$', views.beaches, name='beaches'),
url(r'^wildlife', views.wildlife, name='wildlife'),
url(r'^contact$', views.contact, name='contact'),
url(r'^covid$', views.covid, name='covid'),
url(r'^admin/', admin.site.urls),
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| trefechanwen/urls.py | 1,745 | trefechanwen URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) | 638 | en | 0.657507 |
#!/usr/bin/env python
# coding: utf-8
# # Exploring JHU COVID Case, Death, and Vaccine Information
# This notebook takes the live, updated data from JHU CSSE and GovEx, formats and simplifies it for my purposes, and saves it in csv files in the same directory. The two data sources use slightly different conventions and provide data for slightly different locations, so I standardized column names and kept only those rows common to both datasets. It makes most sense for this to be run once, so that the same data is used every time. In the future, it could be worthwhile to make the processes in this project run on 'live' data, but not for the purposes of this project at this time.
#
# #### Data Sources
# * [Case Data - JHU CSSE](https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv)
# * [Vaccine Data - JHU GovEx](https://raw.githubusercontent.com/govex/COVID-19/master/data_tables/vaccine_data/global_data/time_series_covid19_vaccine_doses_admin_global.csv)
#
# #### Technical Sources
# * [Pandas Documentation](https://pandas.pydata.org/docs/)
# * [MatPlotLib.PyPlot Documentation](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html)
# * [Standardizing Dates with `datetime.datetime` - Stack Overflow](https://stackoverflow.com/questions/4709652/python-regex-to-match-dates)
# * [Getting Only Date in `datetime.datetime`](https://stackoverflow.com/questions/18039680/django-get-only-date-from-datetime-strptime)
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
import seaborn as sns
import sys
# ## Case Info
# In[2]:
case_data = pd.read_csv('https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv')
print(case_data.shape)
case_data.head()
#
# In[3]:
plt.scatter(case_data['3/23/20'], case_data['3/23/21'])
plt.xlim([0, 2000])
plt.ylim([0, 10000])
plt.title('Relations in COVID Case Count Over One Year in Different Countries')
plt.xlabel('Cases on 3/23/2020')
plt.ylabel('Cases on 3/23/2021')
plt.plot(range(2000))
# The above plot is pretty useless in terms of correlation since we know (logically) that total case numbers can only increase. However, it provides a good example of the extremity of difference in scale of typical case numbers (within the range plotted) between early 2020 and early 2021. I also used it just to make sure there wouldn't be any obvious weird things with the data.
#
# The below table indicates mean case count for each day listed. The drastic change is obvious.
# In[4]:
case_data.mean(numeric_only=True)
# ## Vaccine Info
# In[5]:
vaccine_data = pd.read_csv('https://raw.githubusercontent.com/govex/COVID-19/master/data_tables/vaccine_data/global_data/time_series_covid19_vaccine_doses_admin_global.csv')
print(vaccine_data.shape)
vaccine_data.head()
# ## Standardizing Case and Vaccine Info
# The first step is to standardize columns by deleting unnecessary ones and establishing common naming conventions between the two files to minimize mistakes when referring to them:
# In[6]:
# Rename geographic columns in vaccine data to standardize
rename_conventions = {'Province_State': 'Province/State', 'Country_Region': 'Country', 'Country/Region': 'Country'}
case_data.rename(columns=rename_conventions, inplace=True)
vaccine_data.rename(columns=rename_conventions, inplace=True)
# Standardize dates
import datetime
def date_fixer(old_date):
data_type = ''
is_date = False
if len(old_date) == 10 and old_date[4] == '-': # is of format YYYY-MM-DD
date = datetime.datetime.strptime(old_date,'%Y-%m-%d').date()
data_type = 'Vaccinations'
is_date = True
elif len(old_date) >= 6 and old_date[2] == '/' or old_date[1] == '/': # is of format (M)M/(D)D/YY
date = datetime.datetime.strptime(old_date, '%m/%d/%y').date()
data_type = 'Cases'
is_date = True
return str('{}/{}/{} {}'.format(date.month, date.day, date.year, data_type)) if is_date else old_date + data_type
vaccine_data.rename(columns=date_fixer, inplace=True)
case_data.rename(columns=date_fixer, inplace=True)
# Next, I deleted the columns that weren't dates or Country/Region and State/Province. I may later want to use population, but not yet.
# In[7]:
case_data.drop(columns=['Lat', 'Long', 'Province/State'], inplace=True)
vaccine_data.drop(columns=['UID', 'iso2', 'iso3', 'code3', 'FIPS', 'Admin2', 'Lat', 'Long_', 'Combined_Key', 'Population', 'Province/State'], inplace=True)
# Next, I sorted the data, filled in null values with 0, combined rows from the same country, and merged the dataframes.
# In[8]:
case_data.sort_values(by='Country', inplace=True)
vaccine_data.sort_values(by='Country', inplace=True)
vaccine_data.fillna(0.0, inplace=True)
case_data.fillna(0, inplace=True)
case_data = case_data.groupby(['Country']).sum()
vaccine_data = vaccine_data.groupby(['Country']).sum()
case_data.to_csv('case-data.csv')
vaccine_data.to_csv('vaccine-data.csv')
full_data = pd.merge(case_data, vaccine_data, how='inner', on='Country')
print('case data size:', case_data.shape, 'vaccine data size:', vaccine_data.shape, 'full data size:', full_data.shape)
# The next step was to look at all the country names, so I can manually see if I want to get rid of any. I decided to keep them all, at least for now.
# In[9]:
pd.set_option('display.max_seq_items', None)
full_data.index
# Finally, I saved the data into a csv file which can be referenced later. The below cell should really be run once only, so that the same data is used each time. One way to update this project could be to reload the data automatically.
# In[10]:
full_data.to_csv('full-data.csv')
| .scripts/dataprep.py | 5,839 | !/usr/bin/env python coding: utf-8 Exploring JHU COVID Case, Death, and Vaccine Information This notebook takes the live, updated data from JHU CSSE and GovEx, formats and simplifies it for my purposes, and saves it in csv files in the same directory. The two data sources use slightly different conventions and provide data for slightly different locations, so I standardized column names and kept only those rows common to both datasets. It makes most sense for this to be run once, so that the same data is used every time. In the future, it could be worthwhile to make the processes in this project run on 'live' data, but not for the purposes of this project at this time. Data Sources * [Case Data - JHU CSSE](https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv) * [Vaccine Data - JHU GovEx](https://raw.githubusercontent.com/govex/COVID-19/master/data_tables/vaccine_data/global_data/time_series_covid19_vaccine_doses_admin_global.csv) Technical Sources * [Pandas Documentation](https://pandas.pydata.org/docs/) * [MatPlotLib.PyPlot Documentation](https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.html) * [Standardizing Dates with `datetime.datetime` - Stack Overflow](https://stackoverflow.com/questions/4709652/python-regex-to-match-dates) * [Getting Only Date in `datetime.datetime`](https://stackoverflow.com/questions/18039680/django-get-only-date-from-datetime-strptime) In[1]: Case Info In[2]: In[3]: The above plot is pretty useless in terms of correlation since we know (logically) that total case numbers can only increase. However, it provides a good example of the extremity of difference in scale of typical case numbers (within the range plotted) between early 2020 and early 2021. I also used it just to make sure there wouldn't be any obvious weird things with the data. The below table indicates mean case count for each day listed. The drastic change is obvious. In[4]: Vaccine Info In[5]: Standardizing Case and Vaccine Info The first step is to standardize columns by deleting unnecessary ones and establishing common naming conventions between the two files to minimize mistakes when referring to them: In[6]: Rename geographic columns in vaccine data to standardize Standardize dates is of format YYYY-MM-DD is of format (M)M/(D)D/YY Next, I deleted the columns that weren't dates or Country/Region and State/Province. I may later want to use population, but not yet. In[7]: Next, I sorted the data, filled in null values with 0, combined rows from the same country, and merged the dataframes. In[8]: The next step was to look at all the country names, so I can manually see if I want to get rid of any. I decided to keep them all, at least for now. In[9]: Finally, I saved the data into a csv file which can be referenced later. The below cell should really be run once only, so that the same data is used each time. One way to update this project could be to reload the data automatically. In[10]: | 3,049 | en | 0.839176 |
from typing import Tuple
import math
import torch
from torch.optim.optimizer import Optimizer
def linear_warmup_and_cosine_protocol(
f_values: Tuple[float, float, float],
x_milestones: Tuple[int, int, int, int]):
"""
There are 5 regions:
1. constant at f0 for x < x0
2. linear increase from f0 to f1 for x0 < x < x1
3. constant at f1 for x1 < x < x2
4. cosine protocol from f1 to f2 for x2 < x < x3
5. constant at f2 for x > x3
If you want a linear_ramp followed by a cosine_decay only simply set:
1. x0=0 (to eliminate the first constant piece)
2. x2=x1 (to eliminate the second constant piece)
3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay)
"""
assert x_milestones[0] <= x_milestones[1] <= x_milestones[2] <= x_milestones[3]
def fn(step):
if step <= x_milestones[0]:
return float(f_values[0])
elif (step > x_milestones[0]) and (step <= x_milestones[1]):
m = float(f_values[1] - f_values[0]) / float(max(1, x_milestones[1] - x_milestones[0]))
return float(f_values[0]) + m * float(step - x_milestones[0])
elif (step > x_milestones[1]) and (step <= x_milestones[2]):
return float(f_values[1])
elif (step > x_milestones[2]) and (step <= x_milestones[3]):
progress = float(step - x_milestones[2]) / float(max(1, x_milestones[3] - x_milestones[2])) # in (0,1)
tmp = 0.5 * (1.0 + math.cos(math.pi * progress)) # in (1,0)
return float(f_values[2]) + tmp * float(f_values[1] - f_values[2])
else:
return float(f_values[2])
return fn
class LARS(Optimizer):
"""
Extends SGD in PyTorch with LARS scaling from the paper
'Large batch training of Convolutional Networks <https://arxiv.org/pdf/1708.03888.pdf>'_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
trust_coefficient (float, optional): trust coefficient for computing LR (default: 0.001)
eps (float, optional): eps for division denominator (default: 1e-8)
Example:
>>> model = torch.nn.Linear(10, 1)
>>> input = torch.Tensor(10)
>>> target = torch.Tensor([1.])
>>> loss_fn = lambda input, target: (input - target) ** 2
>>> #
>>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
Note:
The application of momentum in the SGD part is modified according to
the PyTorch standards. LARS scaling fits into the equation in the
following fashion.
.. math::
\begin{aligned}
g_{t+1} & = \text{lars_lr} * (\beta * p_{t} + g_{t+1}), \\
v_{t+1} & = \\mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\\end{aligned}
where :math:`p`, :math:`g`, :math:`v`, :math:`\\mu` and :math:`\beta` denote the
parameters, gradient, velocity, momentum, and weight decay respectively.
The :math:`lars_lr` is defined by Eq. 6 in the paper.
The Nesterov version is analogously modified.
.. warning::
Parameters with weight decay set to 0 will automatically be excluded from
layer-wise LR scaling. This is to ensure consistency with papers like SimCLR
and BYOL.
"""
def __init__(
self,
params,
lr=None,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
trust_coefficient=0.001,
eps=1e-8,
):
if lr is None or lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr}")
if momentum < 0.0:
raise ValueError(f"Invalid momentum value: {momentum}")
if weight_decay < 0.0:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
defaults = dict(
lr=lr,
momentum=momentum,
dampening=dampening,
weight_decay=weight_decay,
nesterov=nesterov,
trust_coefficient=trust_coefficient,
eps=eps,
)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
# exclude scaling for params with 0 weight decay
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
p_norm = torch.norm(p.data)
g_norm = torch.norm(p.grad.data)
# lars scaling + weight decay part
if weight_decay != 0:
if p_norm != 0 and g_norm != 0:
lars_lr = p_norm / (g_norm + p_norm * weight_decay + group["eps"])
lars_lr *= group["trust_coefficient"]
d_p = d_p.add(p, alpha=weight_decay)
d_p *= lars_lr
# sgd part
if momentum != 0:
param_state = self.state[p]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(d_p).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
p.add_(d_p, alpha=-group["lr"])
return loss
| src/tissue_purifier/models/_optim_scheduler.py | 6,863 | Extends SGD in PyTorch with LARS scaling from the paper
'Large batch training of Convolutional Networks <https://arxiv.org/pdf/1708.03888.pdf>'_.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
trust_coefficient (float, optional): trust coefficient for computing LR (default: 0.001)
eps (float, optional): eps for division denominator (default: 1e-8)
Example:
>>> model = torch.nn.Linear(10, 1)
>>> input = torch.Tensor(10)
>>> target = torch.Tensor([1.])
>>> loss_fn = lambda input, target: (input - target) ** 2
>>> #
>>> optimizer = LARS(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
Note:
The application of momentum in the SGD part is modified according to
the PyTorch standards. LARS scaling fits into the equation in the
following fashion.
.. math::
egin{aligned}
g_{t+1} & = ext{lars_lr} * (eta * p_{t} + g_{t+1}), \
v_{t+1} & = \mu * v_{t} + g_{t+1}, \
p_{t+1} & = p_{t} - ext{lr} * v_{t+1},
\end{aligned}
where :math:`p`, :math:`g`, :math:`v`, :math:`\mu` and :math:`eta` denote the
parameters, gradient, velocity, momentum, and weight decay respectively.
The :math:`lars_lr` is defined by Eq. 6 in the paper.
The Nesterov version is analogously modified.
.. warning::
Parameters with weight decay set to 0 will automatically be excluded from
layer-wise LR scaling. This is to ensure consistency with papers like SimCLR
and BYOL.
There are 5 regions:
1. constant at f0 for x < x0
2. linear increase from f0 to f1 for x0 < x < x1
3. constant at f1 for x1 < x < x2
4. cosine protocol from f1 to f2 for x2 < x < x3
5. constant at f2 for x > x3
If you want a linear_ramp followed by a cosine_decay only simply set:
1. x0=0 (to eliminate the first constant piece)
2. x2=x1 (to eliminate the second constant piece)
3. max_epochs=x3 (to make the simulation stop after the linear or cosine decay)
Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
in (0,1) in (1,0) exclude scaling for params with 0 weight decay lars scaling + weight decay part sgd part | 2,662 | en | 0.659926 |
'''
Title : Shape and Reshape
Subdomain : Numpy
Domain : Python
Author : codeperfectplus
Created : 06 May 2020
'''
import numpy as np
arr = list(map(int,input().split()))
arr = np.array(arr)
print(np.reshape(arr,(3,3)))
| HackerRank/PythonHackerRankSolutions/Numpy/ShapeandReshape.py | 235 | Title : Shape and Reshape
Subdomain : Numpy
Domain : Python
Author : codeperfectplus
Created : 06 May 2020 | 118 | en | 0.483868 |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import enzoTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
assert_greater_than,
assert_greater_than_or_equal,
)
class WalletEncryptionTest(enzoTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "c")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wallet already unlocked
self.nodes[0].walletpassphrase(passphrase, 12000, True)
assert_raises_rpc_error(-17, "Wallet is already unlocked", self.nodes[0].walletpassphrase, passphrase, 100, True)
self.nodes[0].walletlock()
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
# Test timeout bounds
assert_raises_rpc_error(-8, "Timeout cannot be negative.", self.nodes[0].walletpassphrase, passphrase2, -10)
# Check the timeout
# Check a time less than the limit
MAX_VALUE = 100000000
expected_time = int(time.time()) + MAX_VALUE - 600
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE - 600)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
# Check a time greater than the limit
expected_time = int(time.time()) + MAX_VALUE - 1
self.nodes[0].walletpassphrase(passphrase2, MAX_VALUE + 1000)
actual_time = self.nodes[0].getwalletinfo()['unlocked_until']
assert_greater_than_or_equal(actual_time, expected_time)
assert_greater_than(expected_time + 5, actual_time) # 5 second buffer
if __name__ == '__main__':
WalletEncryptionTest().main()
| test/functional/wallet_encryption.py | 3,797 | Test Wallet encryption
!/usr/bin/env python3 Copyright (c) 2016-2017 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Make sure the wallet isn't encrypted first Encrypt the wallet Test that the wallet is encrypted Check that walletpassphrase works Check that the timeout is right Test wrong passphrase Test walletlock Test wallet already unlocked Test passphrase changes Test timeout bounds Check the timeout Check a time less than the limit 5 second buffer Check a time greater than the limit 5 second buffer | 622 | en | 0.752559 |
#!/usr/bin/env python3
# Copyright (c) 2017 The Eurodollar Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test deprecation of RPC calls."""
from test_framework.test_framework import EurodollarTestFramework
from test_framework.util import assert_raises_rpc_error
class DeprecatedRpcTest(EurodollarTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
self.extra_args = [[], ["-deprecatedrpc=estimatefee", "-deprecatedrpc=createmultisig"]]
def run_test(self):
self.log.info("estimatefee: Shows deprecated message")
assert_raises_rpc_error(-32, 'estimatefee is deprecated', self.nodes[0].estimatefee, 1)
self.log.info("Using -deprecatedrpc=estimatefee bypasses the error")
self.nodes[1].estimatefee(1)
self.log.info("Make sure that -deprecatedrpc=createmultisig allows it to take addresses")
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, [self.nodes[0].getnewaddress()])
self.nodes[1].createmultisig(1, [self.nodes[1].getnewaddress()])
if __name__ == '__main__':
DeprecatedRpcTest().main()
| test/functional/rpc_deprecated.py | 1,268 | Test deprecation of RPC calls.
!/usr/bin/env python3 Copyright (c) 2017 The Eurodollar Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. | 236 | en | 0.567127 |
#
# PySNMP MIB module CISCO-EVC-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-EVC-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:57:43 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
CiscoCosList, = mibBuilder.importSymbols("CISCO-TC", "CiscoCosList")
ifIndex, InterfaceIndexOrZero = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndexOrZero")
VlanId, VlanIdOrNone = mibBuilder.importSymbols("Q-BRIDGE-MIB", "VlanId", "VlanIdOrNone")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Unsigned32, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Bits, IpAddress, MibIdentifier, TimeTicks, Gauge32, iso, ModuleIdentity, NotificationType, Counter64, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Unsigned32", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Bits", "IpAddress", "MibIdentifier", "TimeTicks", "Gauge32", "iso", "ModuleIdentity", "NotificationType", "Counter64", "Counter32")
RowStatus, TruthValue, MacAddress, StorageType, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "MacAddress", "StorageType", "DisplayString", "TextualConvention")
ciscoEvcMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 613))
ciscoEvcMIB.setRevisions(('2012-05-21 00:00', '2008-05-01 00:00', '2007-12-20 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoEvcMIB.setRevisionsDescriptions(('- Added following objects to cevcSITable: * cevcSICreationType * cevcSIType - Added following objects to cevcSIForwardBdTable: * cevcSIForwardBdNumberBase * cevcSIForwardBdNumber1kBitmap * cevcSIForwardBdNumber2kBitmap * cevcSIForwardBdNumber3kBitmap * cevcSIForwardBdNumber4kBitmap - Added MacSecurityViolation OID subtree and following objects: * cevcMacAddress * cevcMaxMacConfigLimit * cevcSIID - Deprecated cevcEvcNotificationGroup and added cevcEvcNotificationGroupRev1 and added cevcMacSecurityViolationNotification - Deprecated cevcSIGroup and added cevcSIGroupRev1 and added cevcSICreationType and cevcSIType - Deprecated cevcSIForwardGroup and added cevcSIForwardGroupRev1 and added the new objects mentioned in cevcSIForwardBdTable - Added CevcMacSecurityViolationCause Textual convention - Added new ciscoEvcMIBComplianceRev2', '- Added following enums to cevcSIOperStatus: * deleted(4) * errorDisabled(5) * unknown(6) - Added following named bits to cevcSIMatchEncapValid: * payloadTypes(3) * priorityCos(4) * dot1qNativeVlan(5) * dot1adNativeVlan(6) * encapExact(7) - The Object cevcSIMatchEncapPayloadType is replaced by new object cevcSIMatchEncapPayloadTypes to support multiple payload types for service instance match criteria. - Added new object cevcSIMatchEncapPriorityCos to cevcSIMatchEncapTable. - Added new Compliance ciscoEvcMIBComplianceRev1. - Added new Object Group cevcSIMatchCriteriaGroupRev1. - Miscellaneous updates/corrections.', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoEvcMIB.setLastUpdated('201205210000Z')
if mibBuilder.loadTexts: ciscoEvcMIB.setOrganization('Cisco Systems, Inc.')
if mibBuilder.loadTexts: ciscoEvcMIB.setContactInfo('Cisco Systems Customer Service Postal: 170 W Tasman Drive San Jose, CA 95134 USA Tel: +1 800 553-NETS E-mail: cs-ethermibs@cisco.com')
if mibBuilder.loadTexts: ciscoEvcMIB.setDescription("Metro Ethernet services can support a wide range of applications and subscriber needs easily, efficiently and cost-effectively. Using standard Ethernet interfaces, subscribers can set up secure, private Ethernet Virtual Connections, to connect their sites together and connect to business partners, suppliers and the Internet. This MIB module defines the managed objects and notifications describing Ethernet Virtual Connections. Ethernet Virtual Connections (EVC), are defined by the Metro Ethernet Forum (MEF), as an association between two or more UNIs. Frames within an EVC can only be exchanged among the associated UNIs. Frames sent into the MEN via a particular UNI must not be delivered back to the UNI from which it originated. Along an EVC path, there are demarcation flow points on associated ingress and egress interface, of every device, through which the EVC passes. A service instance represents these flow points where a service passes through an interface. From an operational perspective, a service instance serves three purposes: 1. Defines the instance of a particular EVC service on a specific interface and identifies all frames that belongs to that particular service/flow. 2. To provide the capability of applying the configured features to those frames belonging to the service. 3. To optionally define how to forward those frames in the data-path. The association of a service instance to an EVC depicts an instance of an Ethernet flow on a particular interface for an end-to-end (UNI-to-UNI) Ethernet service for a subscriber. The following diagram illustrates the association of EVC, UNIs and service instances. UNI physical ports are depicted as 'U', and service instances as 'x'. CE MEN MEN CE ------- ------- ------- ------- | | | | () | | | | | |--------Ux x|--( )--|x xU--------| | | | | | () | | | | ------- ------- ------- ------- ^ ^ | | -------- EVC --------- This MIB module addresses the functional areas of network management for EVC, including: The operational mode for interfaces that are providing Ethernet service(s). The service attributes regarding an interface behaving as UNI, such as CE-VLAN mapping and layer 2 control protocol (eg. stp, vtp, cdp) processing. The provisioning of service instances to define flow points for an Ethernet service. The operational status of EVCs for notifications of status changes, and EVC creation and deletion. Definition of terms and acronyms: B-Tag: Backbone Tag field in Ethernet 802.1ah frame CE: Customer Edge CE-VLAN: Customer Edge VLAN CoS: Class Of Service EVC: Ethernet Virtual Connection I-SID: Service Instance Identifier field in Ethernet 802.1ah frame MAC: Media Access Control MEN: Metro Ethernet Network NNI: Network to Network Interface OAM: Operations Administration and Management PPPoE: Point-to-Point Protocol over Ethernet Service frame: An Ethernet frame transmitted across the UNI toward the service provider or an Ethernet frame transmitted across the UNI toward the Subscriber. Service Instance: A flow point of an Ethernet service Service provider: The organization providing Ethernet service(s). Subscriber: The organization purchasing and/or using Ethernet service(s). UNI: User Network Interface The physical demarcation point between the responsibility of the service provider and the responsibility of the Subscriber. UNI-C: User Network Interface, subscriber side UNI-N: User Network Interface, service provider side VLAN: Virtual Local Area Network")
ciscoEvcMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 0))
ciscoEvcMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1))
ciscoEvcMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 2))
cevcSystem = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 1))
cevcPort = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2))
cevcEvc = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3))
cevcServiceInstance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4))
cevcEvcNotificationConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 5))
cevcMacSecurityViolation = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6))
class CevcMacSecurityViolationCauseType(TextualConvention, Integer32):
description = "An integer value which identifies the cause for the MAC Security Violation. If the system MAC Address limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedSystemLimit' value. If the Bridge domain limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedBdLimit' value. If the Service Instance limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedSILimit' value. If the MAC address is present in the Black list then cevcMacSecurityViolationCauseType will contain 'blackListDeny' value."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("exceedSystemLimit", 1), ("exceedBdLimit", 2), ("exceedSILimit", 3), ("blackListDeny", 4))
class CiscoEvcIndex(TextualConvention, Unsigned32):
description = 'An integer-value which uniquely identifies the EVC.'
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class CiscoEvcIndexOrZero(TextualConvention, Unsigned32):
description = "This textual convention is an extension to textual convention 'CiscoEvcIndex'. It includes the value of '0' in addition to the range of 1-429496725. Value of '0' indicates that the EVC has been neither configured nor assigned."
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 4294967295)
class CevcL2ControlProtocolType(TextualConvention, Integer32):
description = "Defines the different types of layer 2 control protocols: 'other' None of the following. 'cdp' Cisco Discovery Protocol. 'dtp' Dynamic Trunking Protocol. 'pagp' Port Aggregration Protocol. 'udld' UniDirectional Link Detection. 'vtp' Vlan Trunking Protocol. 'lacp' Link Aggregation Control Protocol. 'dot1x' IEEE 802.1x 'stp' Spanning Tree Protocol."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))
namedValues = NamedValues(("other", 1), ("cdp", 2), ("dtp", 3), ("pagp", 4), ("udld", 5), ("vtp", 6), ("lacp", 7), ("dot1x", 8), ("stp", 9))
class ServiceInstanceTarget(TextualConvention, OctetString):
description = "Denotes a generic service instance target. An ServiceInstanceTarget value is always interpreted within the context of an ServiceInstanceTargetType value. Every usage of the ServiceInstanceTarget textual convention is required to specify the ServiceInstanceTargetType object which provides the context. It is suggested that the ServiceInstanceTargetType object is logically registered before the object(s) which use the ServiceInstanceTarget textual convention if they appear in the same logical row. The value of an ServiceInstanceTarget object must always be consistent with the value of the associated ServiceInstanceTargetType object. Attempts to set an ServiceInstanceTarget object to a value which is inconsistent with the associated ServiceInstanceTargetType must fail with an inconsistentValue error. When this textual convention is used as the syntax of an index object, there may be issues with the limit of 128 sub-identifiers specified in SMIv2, STD 58. In this case, the object definition MUST include a 'SIZE' clause to limit the number of potential instance sub-identifiers."
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(0, 40)
class ServiceInstanceTargetType(TextualConvention, Integer32):
description = "Defines the type of interface/media to which a service instance is attached. 'other' None of the following. This value MUST be used if the value of the corresponding ServiceInstanceTarget object is a zero-length string. 'interface' Service instance is attached to the the interface defined by ServiceInstanceInterface textual convention. Each definition of a concrete ServiceInstanceTargetType value must be accompanied by a definition of a textual convention for use with that ServiceInstanceTargetType. To support future extensions, the ServiceInstanceTargetType textual convention SHOULD NOT be sub-typed in object type definitions. It MAY be sub-typed in compliance statements in order to require only a subset of these target types for a compliant implementation. Implementations must ensure that ServiceInstanceTargetType objects and any dependent objects (e.g. ServiceInstanceTarget objects) are consistent. An inconsistentValue error must be generated if an attempt to change an ServiceInstanceTargetType object would, for example, lead to an undefined ServiceInstanceTarget value. In particular, ServiceInstanceTargetType/ServiceInstanceTarget pairs must be changed together if the service instance taget type changes."
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("other", 1), ("interface", 2))
class ServiceInstanceInterface(TextualConvention, OctetString):
description = "This textual convention indicates the ifIndex which identifies the interface that the service instance is attached, for which the corresponding ifType has the value of (but not limited to) 'ethernetCsmacd'. octets contents encoding 1-4 ifIndex network-byte order The corresponding ServiceInstanceTargetType value is interface(2)."
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4)
fixedLength = 4
cevcMacAddress = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcMacAddress.setStatus('current')
if mibBuilder.loadTexts: cevcMacAddress.setDescription('This object indicates the MAC Address which has violated the Mac security rules.')
cevcMaxMacConfigLimit = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6, 2), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cevcMaxMacConfigLimit.setStatus('current')
if mibBuilder.loadTexts: cevcMaxMacConfigLimit.setDescription('This object specifies the maximum MAC configuration limit. This is also sent as a part of MAC security violation notification. Every platform has their own forwarding table limitation. User can also set the maximum MAC configuration limit and if the limit set by user is not supported by platform then the object returns error.')
cevcSIID = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcSIID.setStatus('current')
if mibBuilder.loadTexts: cevcSIID.setDescription('This object indicates the service instance ID for the MAC security violation notification.')
cevcViolationCause = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 6, 4), CevcMacSecurityViolationCauseType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcViolationCause.setStatus('current')
if mibBuilder.loadTexts: cevcViolationCause.setDescription("This object indicates the MAC security violation cause. When the system MAC Address limit is exceeded, the cevcMacSecurityViolationCause will contain 'exceedSystemLimit' value. When the Bridge domain limit is exceeded, the cevcMacSecurityViolationCause will contain 'exceedBdLimit' value. When the Service Instance limit is exceeded, the cevcMacSecurityViolationCause will contain 'exceedSILimit' value. If the MAC address is present in the Black list then cevcMacSecurityViolationCause will contain 'blackListDeny' value.")
cevcMaxNumEvcs = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 1, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcMaxNumEvcs.setStatus('current')
if mibBuilder.loadTexts: cevcMaxNumEvcs.setDescription('This object indicates the maximum number of EVCs that the system supports.')
cevcNumCfgEvcs = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcNumCfgEvcs.setStatus('current')
if mibBuilder.loadTexts: cevcNumCfgEvcs.setDescription('This object indicates the actual number of EVCs currently configured on the system.')
cevcPortTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1), )
if mibBuilder.loadTexts: cevcPortTable.setStatus('current')
if mibBuilder.loadTexts: cevcPortTable.setDescription("This table provides the operational mode and configuration limitations of the physical interfaces (ports) that provide Ethernet services for the MEN. This table has a sparse depedent relationship on the ifTable, containing a row for each ifEntry having an ifType of 'ethernetCsmacd' capable of supporting Ethernet services.")
cevcPortEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cevcPortEntry.setStatus('current')
if mibBuilder.loadTexts: cevcPortEntry.setDescription("This entry represents a port, a physical point, at which signals can enter or leave the network en route to or from another network to provide Ethernet services for the MEN. The system automatically creates an entry for each ifEntry in the ifTable having an ifType of 'ethernetCsmacd' capable of supporting Ethernet services and entries are automatically destroyed when the corresponding row in the ifTable is destroyed.")
cevcPortMode = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("uni", 1), ("nni", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cevcPortMode.setStatus('current')
if mibBuilder.loadTexts: cevcPortMode.setDescription("Port denotes the physcial interface which can provide Ethernet services. This object indicates the mode of the port and its operational behaviour in the MEN. 'uni' User Network Interface The port resides on the interface between the end user and the network. Additional information related to the UNI is included in cevcUniTable. 'nni' Network to Network Interface. The port resides on the interface between two networks.")
cevcPortMaxNumEVCs = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcPortMaxNumEVCs.setStatus('current')
if mibBuilder.loadTexts: cevcPortMaxNumEVCs.setDescription('This object indicates the maximum number of EVCs that the interface can support.')
cevcPortMaxNumServiceInstances = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcPortMaxNumServiceInstances.setStatus('current')
if mibBuilder.loadTexts: cevcPortMaxNumServiceInstances.setDescription('This object indicates the maximum number of service instances that the interface can support.')
cevcUniTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2), )
if mibBuilder.loadTexts: cevcUniTable.setStatus('current')
if mibBuilder.loadTexts: cevcUniTable.setDescription("This table contains a list of UNIs locally configured on the system. This table has a sparse dependent relationship on the cevcPortTable, containing a row for each cevcPortEntry having a cevcPortMode column value 'uni'.")
cevcUniEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cevcUniEntry.setStatus('current')
if mibBuilder.loadTexts: cevcUniEntry.setDescription("This entry represents an UNI and its service attributes. The system automatically creates an entry when the system or the EMS/NMS creates a row in the cevcPortTable with a cevcPortMode of 'uni'. Likewise, the system automatically destroys an entry when the system or the EMS/NMS destroys the corresponding row in the cevcPortTable.")
cevcUniIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2, 1, 1), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcUniIdentifier.setReference("MEF 16, 'Ethernet Local Management Interface (E-LMI)', January 2006")
if mibBuilder.loadTexts: cevcUniIdentifier.setStatus('current')
if mibBuilder.loadTexts: cevcUniIdentifier.setDescription('This object specifies a string-value assigned to a UNI for identification. When the UNI identifier is configured by the system or the EMS/NMS, it should be unique among all UNIs for the MEN. If the UNI identifier value is not specified, the value of the cevcUniIdentifier column is a zero-length string.')
cevcUniPortType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dot1q", 1), ("dot1ad", 2))).clone('dot1q')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcUniPortType.setStatus('current')
if mibBuilder.loadTexts: cevcUniPortType.setDescription("This object specifies the UNI port type. 'dot1q' The UNI port is an IEEE 802.1q port. 'dot1ad' The UNI port is an IEEE 802.1ad port.")
cevcUniServiceAttributes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 2, 1, 3), Bits().clone(namedValues=NamedValues(("serviceMultiplexing", 0), ("bundling", 1), ("allToOneBundling", 2))).clone(namedValues=NamedValues(("serviceMultiplexing", 0), ("bundling", 1)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcUniServiceAttributes.setStatus('current')
if mibBuilder.loadTexts: cevcUniServiceAttributes.setDescription("This object specifies the UNI service attributes. 'serviceMultiplexing' This bit specifies whether the UNI supports multiple EVCs. Point-to-Point EVCs and Multipoint-to-Multipoint EVCs may be multiplexed in any combination at the UNI if this bit is set to '1'. 'bundling' This bit specifies whether the UNI has the bundling attribute configured. If this bit is set to '1', more than one CE-VLAN ID can map to a particular EVC at the UNI. 'allToOneBundling' This bit specifies whether the UNI has the all to one bundling attribute. If this bit is set to '1', all CE-VLAN IDs map to a single EVC at the UNI. To summarize the valid combinations of serviceMultiplexing(0), bundling(1) and allToOneBundling(2) bits for an UNI, consider the following diagram: VALID COMBINATIONS +---------------+-------+-------+-------+-------+-------+ |UNI ATTRIBUTES | 1 | 2 | 3 | 4 | 5 | +---------------+-------+------+------------------------+ |Service | | | | | | |Multiplexing | | Y | Y | | | | | | | | | | +---------------+-------+-------+-------+-------+-------+ | | | | | | | |Bundling | | | Y | Y | | | | | | | | | +---------------+-------+-------+-------+-------+-------+ |All to One | | | | | | |Bundling | | | | | Y | | | | | | | | +---------------+-------+-------+------ +-------+-------+")
cevcPortL2ControlProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 3), )
if mibBuilder.loadTexts: cevcPortL2ControlProtocolTable.setStatus('current')
if mibBuilder.loadTexts: cevcPortL2ControlProtocolTable.setDescription('This table lists the layer 2 control protocol processing attributes at UNI ports. This table has an expansion dependent relationship on the cevcUniTable, containing zero or more rows for each UNI.')
cevcPortL2ControlProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-EVC-MIB", "cevcPortL2ControlProtocolType"))
if mibBuilder.loadTexts: cevcPortL2ControlProtocolEntry.setStatus('current')
if mibBuilder.loadTexts: cevcPortL2ControlProtocolEntry.setDescription('This entry represents the layer 2 control protocol processing at the UNI. The system automatically creates an entry for each layer 2 control protocol type when an entry is created in the cevcUniTable, and entries are automatically destroyed when the system destroys the corresponding row in the cevcUniTable.')
cevcPortL2ControlProtocolType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 3, 1, 1), CevcL2ControlProtocolType())
if mibBuilder.loadTexts: cevcPortL2ControlProtocolType.setStatus('current')
if mibBuilder.loadTexts: cevcPortL2ControlProtocolType.setDescription('This object indicates the type of layer 2 control protocol service frame as denoted by the value of cevcPortL2ControlProtocolAction column.')
cevcPortL2ControlProtocolAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("discard", 1), ("peer", 2), ("passToEvc", 3), ("peerAndPassToEvc", 4))).clone('discard')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcPortL2ControlProtocolAction.setStatus('current')
if mibBuilder.loadTexts: cevcPortL2ControlProtocolAction.setDescription("This object specifies the action to be taken for the given layer 2 control protocol service frames which matches the cevcPortL2ControlProtocolType, including: 'discard' The port must discard all ingress service frames carrying the layer 2 control protocol service frames and the port must not generate any egress service frames carrying the layer 2 control protocol service frames. When this action is set at the port, an EVC cannot process the layer 2 control protocol service frames. 'peer' The port must act as a peer, meaning it actively participates with the Customer Equipment, in the operation of the layer 2 control protocol service frames. An example of this is port authentication service at the UNI with 802.1x or enhanced link OAM functionality by peering at the UNI with link OAM (IEEE 802.3ah). When this action is set at the port, an EVC cannot process the layer 2 control protocol service frames. 'passToEvc' The disposition of the service frames which are layer 2 control protocol service frames must be determined by the layer 2 control protocol action attribute of the EVC, (see cevcSIL2ControlProtocolAction for further details). 'peerAndPassToEvc' The layer 2 control protocol service frames will be peered at the port and also passed to one or more EVCs for tunneling. An example of this possibility is where an 802.1x authentication frame is peered at the UNI for UNI-based authentication, but also passed to a given EVC for customer end-to-end authentication.")
cevcUniCEVlanEvcTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4), )
if mibBuilder.loadTexts: cevcUniCEVlanEvcTable.setStatus('current')
if mibBuilder.loadTexts: cevcUniCEVlanEvcTable.setDescription('This table contains for each UNI, a list of EVCs and the association of CE-VLANs to the EVC. The CE-VLAN mapping is locally significant to the UNI. This table has an expansion dependent relationship on the cevcUniTable, containing zero or more rows for each UNI.')
cevcUniCEVlanEvcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-EVC-MIB", "cevcUniEvcIndex"), (0, "CISCO-EVC-MIB", "cevcUniCEVlanEvcBeginningVlan"))
if mibBuilder.loadTexts: cevcUniCEVlanEvcEntry.setStatus('current')
if mibBuilder.loadTexts: cevcUniCEVlanEvcEntry.setDescription('This entry represents an EVC and the CE-VLANs that are mapped to it at an UNI. For example, if CE-VLANs 10, 20-30, 40 are mapped to an EVC indicated by cevcUniEvcIndex = 1, at an UNI with ifIndex = 2, this table will contain following rows to represent above CE-VLAN map: cevcUniCEVlanEvcEndingVlan.2.1.10 = 0 cevcUniCEVlanEvcEndingVlan.2.1.20 = 30 cevcUniCEVlanEvcEndingVlan.2.1.40 = 0 The system automatically creates an entry when the system creates an entry in the cevcUniTable and an entry is created in cevcSICEVlanTable for a service instance which is attached to an EVC on this UNI. Likewise, the system automatically destroys an entry when the system or the EMS/NMS destroys the corresponding row in the cevcUniTable or in the cevcSICEVlanTable.')
cevcUniEvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4, 1, 1), CiscoEvcIndex())
if mibBuilder.loadTexts: cevcUniEvcIndex.setStatus('current')
if mibBuilder.loadTexts: cevcUniEvcIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies the EVC attached at an UNI.')
cevcUniCEVlanEvcBeginningVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4, 1, 2), VlanId())
if mibBuilder.loadTexts: cevcUniCEVlanEvcBeginningVlan.setStatus('current')
if mibBuilder.loadTexts: cevcUniCEVlanEvcBeginningVlan.setDescription("If cevcUniCEVlanEvcEndingVlan is '0', then this object indicates a single VLAN in the list. If cevcUniCEVlanEvcEndingVlan is not '0', then this object indicates the first VLAN in a range of VLANs.")
cevcUniCEVlanEvcEndingVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 2, 4, 1, 3), VlanIdOrNone()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcUniCEVlanEvcEndingVlan.setStatus('current')
if mibBuilder.loadTexts: cevcUniCEVlanEvcEndingVlan.setDescription("This object indicates the last VLAN in a range of VLANs. If the row does not describe a range, then the value of this column must be '0'.")
cevcEvcTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1), )
if mibBuilder.loadTexts: cevcEvcTable.setStatus('current')
if mibBuilder.loadTexts: cevcEvcTable.setDescription('This table contains a list of EVCs and their service attributes.')
cevcEvcEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcEvcIndex"))
if mibBuilder.loadTexts: cevcEvcEntry.setStatus('current')
if mibBuilder.loadTexts: cevcEvcEntry.setDescription("This entry represents the EVC configured on the system and its service atrributes. Entries in this table may be created and deleted via the cevcEvcRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcEvcRowStatus column to 'createAndGo'or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcEvcRowStatus column to 'destroy'.")
cevcEvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 1), CiscoEvcIndex())
if mibBuilder.loadTexts: cevcEvcIndex.setStatus('current')
if mibBuilder.loadTexts: cevcEvcIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies the EVC.')
cevcEvcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcEvcRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcEvcTable. cevcEvcIdentifier column must have a valid value before a row can be set to 'active'. Writable objects in this table can be modified while the value of cevcEvcRowStatus column is 'active'. An entry cannot be deleted if there exists a service instance which is referring to the cevcEvcEntry i.e. cevcSIEvcIndex in the cevcSITable has the same value as cevcEvcIndex being deleted.")
cevcEvcStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcEvcStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcEvcIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(1, 100))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcIdentifier.setReference("MEF 16, 'Ethernet Local Management Interface (E-LMI)', January 2006")
if mibBuilder.loadTexts: cevcEvcIdentifier.setStatus('current')
if mibBuilder.loadTexts: cevcEvcIdentifier.setDescription('This object specifies a string-value identifying the EVC. This value should be unique across the MEN.')
cevcEvcType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("pointToPoint", 1), ("multipointToMultipoint", 2))).clone('pointToPoint')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcType.setStatus('current')
if mibBuilder.loadTexts: cevcEvcType.setDescription("This object specifies the type of EVC: 'pointToPoint' Exactly two UNIs are associated with one another. An ingress service frame at one UNI must not result in an egress service frame at a UNI other than the other UNI in the EVC. 'multipointToMultipoint' Two or more UNIs are associated with one another. An ingress service frame at one UNI must not result in an egress service frame at a UNI that is not in the EVC.")
cevcEvcCfgUnis = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(2, 4294967295)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcEvcCfgUnis.setStatus('current')
if mibBuilder.loadTexts: cevcEvcCfgUnis.setDescription("This object specifies the number of UNIs expected to be configured for the EVC in the MEN. The underlying OAM protocol can use this value of UNIs to determine the EVC operational status, cevcEvcOperStatus. For a Multipoint-to-Multipoint EVC the minimum number of Uni's would be two.")
cevcEvcStateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 2), )
if mibBuilder.loadTexts: cevcEvcStateTable.setStatus('current')
if mibBuilder.loadTexts: cevcEvcStateTable.setDescription('This table lists statical/status data of the EVC. This table has an one-to-one dependent relationship on the cevcEvcTable, containing a row for each EVC.')
cevcEvcStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 2, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcEvcIndex"))
if mibBuilder.loadTexts: cevcEvcStateEntry.setStatus('current')
if mibBuilder.loadTexts: cevcEvcStateEntry.setDescription('This entry represents status atrributes of an EVC. The system automatically creates an entry when the system or the EMS/NMS creates a row in the cevcEvcTable. Likewise, the system automatically destroys an entry when the system or the EMS/NMS destroys the corresponding row in the cevcEvcTable.')
cevcEvcOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("unknown", 1), ("active", 2), ("partiallyActive", 3), ("inactive", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcOperStatus.setStatus('current')
if mibBuilder.loadTexts: cevcEvcOperStatus.setDescription("This object specifies the operational status of the EVC: 'unknown' Not enough information available regarding the EVC to determine the operational status at this time or EVC operational status is undefined. 'active' Fully operational between the UNIs in the EVC. 'partiallyActive' Capable of transferring traffic among some but not all of the UNIs in the EVC. This operational status is applicable only for Multipoint-to-Multipoint EVCs. 'inactive' Not capable of transferring traffic among any of the UNIs in the EVC. This value is derived from data gathered by underlying OAM protocol.")
cevcEvcActiveUnis = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 2, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcActiveUnis.setStatus('current')
if mibBuilder.loadTexts: cevcEvcActiveUnis.setDescription('This object indicates the number of active UNIs for the EVC in the MEN. This value is derived from data gathered by underlying OAM Protocol.')
cevcEvcUniTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3), )
if mibBuilder.loadTexts: cevcEvcUniTable.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniTable.setDescription("This table contains a list of UNI's for each EVC configured on the device. The UNIs can be local (i.e. physically located on the system) or remote (i.e. not physically located on the device). For local UNIs, the UNI Id is the same as denoted by cevcUniIdentifier with the same ifIndex value as cevcEvcLocalUniIfIndex. For remote UNIs, the underlying OAM protocol, if capable, provides the UNI Id via its protocol messages. This table has an expansion dependent relationship on the cevcEvcTable, containing a row for each UNI that is in the EVC.")
cevcEvcUniEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcEvcIndex"), (0, "CISCO-EVC-MIB", "cevcEvcUniIndex"))
if mibBuilder.loadTexts: cevcEvcUniEntry.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniEntry.setDescription('This entry represents a UNI, either local or remote, in the EVC. The system automatically creates an entry, when an UNI is attached to the EVC. Entries are automatically destroyed when the system or the EMS/NMS destroys the corresponding row in the cevcEvcTable or when an UNI is removed from the EVC.')
cevcEvcUniIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cevcEvcUniIndex.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies the UNI in an EVC.')
cevcEvcUniId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcUniId.setReference('MEF 16, Ethernet Local Management Interface (E-LMI), January 2006')
if mibBuilder.loadTexts: cevcEvcUniId.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniId.setDescription('This object indicates the string-value identifying the UNI that is in the EVC. For UNI that is local, this value is the same as cevcUniIdentifier for the same ifIndex value as cevcEvcLocalUniIfIndex. For UNI that is not on the system, this value may be derived from the underlying OAM protocol. If the UNI identifier value is not specified for the UNI or it is unknown, the value of the cevcEvcUniId column is a zero-length string.')
cevcEvcUniOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("unknown", 1), ("notReachable", 2), ("up", 3), ("down", 4), ("adminDown", 5), ("localExcessiveError", 6), ("remoteExcessiveError", 7), ("localInLoopback", 8), ("remoteInLoopback", 9), ("localOutLoopback", 10), ("remoteOutLoopback", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcUniOperStatus.setStatus('current')
if mibBuilder.loadTexts: cevcEvcUniOperStatus.setDescription("This object indicates the operational status derived from data gathered by the OAM protocol for an UNI. 'unknown' Status is not known; possible reason could be caused by the OAM protocol has not provided information regarding the UNI. 'notReachable' UNI is not reachable; possible reason could be caused by the OAM protocol messages having not been received for an excessive length of time. 'up' UNI is active, up, and able to pass traffic. 'down' UNI is down and not passing traffic. 'adminDown' UNI has been administratively put in down state. 'localExcessiveError' UNI has experienced excessive number of invalid frames on the local end of the physical link between UNI-C and UNI-N. 'remoteExcessiveError' UNI has experienced excessive number of invalid frames on the remote side of the physical connection between UNI-C and UNI-N. 'localInLoopback' UNI is loopback on the local end of the physical link between UNI-C and UNI-N. 'remoteInLoopback' UNI is looped back on the remote end of the link between UNI-C and UNI-N. 'localOutLoopback' UNI just transitioned out of loopback on the local end of the physcial link between UNI-C and UNI-N. 'remoteOutLoopback' UNI just transitioned out of loopback on the remote end of the physcial link between UNI-C and UNI-N.")
cevcEvcLocalUniIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 3, 3, 1, 4), InterfaceIndexOrZero()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcEvcLocalUniIfIndex.setStatus('current')
if mibBuilder.loadTexts: cevcEvcLocalUniIfIndex.setDescription("When the UNI is local on the system, this object specifies the ifIndex of the UNI. The value '0' of this column indicates remote UNI.")
cevcSITable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1), )
if mibBuilder.loadTexts: cevcSITable.setStatus('current')
if mibBuilder.loadTexts: cevcSITable.setDescription('This table lists each service instance and its service attributes.')
cevcSIEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"))
if mibBuilder.loadTexts: cevcSIEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIEntry.setDescription("This entry represents a service instance configured on the system and its service attributes. Entries in this table may be created and deleted via the cevcSIRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcSIRowStatus column to 'createAndGo'or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcSIRowStatus column to 'destroy'.")
cevcSIIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cevcSIIndex.setStatus('current')
if mibBuilder.loadTexts: cevcSIIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies a service instance. An implementation MAY assign an ifIndex-value assigned to the service instance to cevcSIIndex.')
cevcSIRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSITable. This object cannot be set to 'active' until following corresponding objects are assigned to valid values: - cevcSITargetType - cevcSITarget - cevcSIName - cevcSIType Following writable objects in this table cannot be modified while the value of cevcSIRowStatus is 'active': - cevcSITargetType - cevcSITarget - cevcSIName - cevcSIType Objects in this table and all other tables that have the same cevcSIIndex value as an index disappear when cevcSIRowStatus is set to 'destroy'.")
cevcSIStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSITargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 4), ServiceInstanceTargetType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSITargetType.setStatus('current')
if mibBuilder.loadTexts: cevcSITargetType.setDescription('This object indicates the type of interface/media to which a service instance has an attachment.')
cevcSITarget = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 5), ServiceInstanceTarget()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSITarget.setStatus('current')
if mibBuilder.loadTexts: cevcSITarget.setDescription('This object indicates the target to which a service instance has an attachment. If the target is unknown, the value of the cevcSITarget column is a zero-length string.')
cevcSIName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 6), SnmpAdminString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIName.setStatus('current')
if mibBuilder.loadTexts: cevcSIName.setDescription("The textual name of the service instance. The value of this column should be the name of the component as assigned by the local interface/media type and should be be suitable for use in commands entered at the device's 'console'. This might be text name, such as 'si1' or a simple service instance number, such as '1', depending on the interface naming syntax of the device. If there is no local name or this object is otherwise not applicable, then this object contains a zero-length string.")
cevcSIEvcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 7), CiscoEvcIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIEvcIndex.setStatus('current')
if mibBuilder.loadTexts: cevcSIEvcIndex.setDescription("This object specifies the EVC Index that the service instance is associated. The value of '0' this column indicates that the service instance is not associated to an EVC. If the value of cevcSIEvcIndex column is not '0', there must exist an active row in the cevcEvcTable with the same index value for cevcEvcIndex.")
cevcSIAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2))).clone('up')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIAdminStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIAdminStatus.setDescription("This object specifies the desired state of the Service Instance. 'up' Ready to transfer traffic. When a system initializes, all service instances start with this state. 'down' The service instance is administratively down and is not capable of transferring traffic.")
cevcSIForwardingType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("other", 0), ("bridgeDomain", 1)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardingType.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardingType.setDescription("This object indicates technique used by a service instance to forward service frames. 'other' If the forwarding behavior of a service instance is not defined or unknown, this object is set to other(0). 'bridgeDomain' Bridge domain is used to forward service frames by a service instance. If cevcSIForwardingType is 'bridgeDomain(1)', there must exist an active row in the cevcSIForwardBdTable with the same index value of cevcSIIndex. The object cevcSIForwardBdNumber indicates the identifier of the bridge domain component being used.")
cevcSICreationType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("static", 1), ("dynamic", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcSICreationType.setStatus('current')
if mibBuilder.loadTexts: cevcSICreationType.setDescription("This object specifies whether the service instance created is statically configured by the user or is dynamically created. 'static' If the service instance is configured manually this object is set to static(1). 'dynamic' If the service instance is created dynamically by the first sign of life of an Ethernet frame, then this object is set to dynamic(2) for the service instance.")
cevcSIType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 1, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("regular", 1), ("trunk", 2), ("l2context", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIType.setStatus('current')
if mibBuilder.loadTexts: cevcSIType.setDescription("This object specifies the type of the service instance. It mentions if the service instance is either a regular or trunk or l2context service instance. 'regular' If a service instance is configured without any type specified, then it is a regular service instance. 'trunk' If the service instance is configured with trunk type, then it is a trunk service instance. For a trunk service instance, its Bridge domain IDs are derived from encapsulation VLAN plus an optional offset (refer cevcSIForwardBdNumberBase object). 'l2context' If the service instance is configured with dynamic type, then it is a L2 context service instance. The Ethernet L2 Context is a statically configured service instance which contains the Ethernet Initiator for attracting the first sign of life. In other words, Ethernet L2 Context service instance is used for catching the first sign of life of Ethernet frames to create dynamic Ethernet sessions service instances.")
cevcSIStateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 2), )
if mibBuilder.loadTexts: cevcSIStateTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIStateTable.setDescription('This table lists statical status data of the service instance. This table has an one-to-one dependent relationship on the cevcSITable, containing a row for each service instance.')
cevcSIStateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 2, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"))
if mibBuilder.loadTexts: cevcSIStateEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIStateEntry.setDescription('This entry represents operational status of a service instance. The system automatically creates an entry when the system or the EMS NMS creates a row in the cevcSITable. Likewise, the system automatically destroys an entry when the system or the EMS NMS destroys the corresponding row in the cevcSITable.')
cevcSIOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("adminDown", 3), ("deleted", 4), ("errorDisabled", 5), ("unknown", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cevcSIOperStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIOperStatus.setDescription("This object indicates the operational status of the Service Instance. 'up' Service instance is fully operational and able to transfer traffic. 'down' Service instance is down and not capable of transferring traffic, and is not administratively configured to be down by management system. 'adminDown' Service instance has been explicitly configured to administratively down by a management system and is not capable of transferring traffic. 'deleted' Service instance has been deleted. 'errorDisabled' Service instance has been shut down due to MAC security violations. 'unknown' Operational status of service instance is unknown or undefined.")
cevcSIVlanRewriteTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3), )
if mibBuilder.loadTexts: cevcSIVlanRewriteTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteTable.setDescription("This table lists the rewrite adjustments of the service frame's VLAN tags for service instances. This table has an expansion dependent relationship on the cevcSITable, containing a row for a VLAN adjustment for ingress and egress frames at each service instance.")
cevcSIVlanRewriteEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIVlanRewriteDirection"))
if mibBuilder.loadTexts: cevcSIVlanRewriteEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteEntry.setDescription('Each entry represents the VLAN adjustment for a Service Instance.')
cevcSIVlanRewriteDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ingress", 1), ("egress", 2))))
if mibBuilder.loadTexts: cevcSIVlanRewriteDirection.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteDirection.setDescription("This object specifies the VLAN adjustment for 'ingress' frames or 'egress' frames on the service instance.")
cevcSIVlanRewriteRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIVlanRewriteTable. cevcSIVlanRewriteAction and cevcSIVlanRewriteEncapsulation must have valid values before this object can be set to 'active'. Writable objects in this table can be modified while the value of cevcSIVlanRewriteRowStatus column is 'active'.")
cevcSIVlanRewriteStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIVlanRewriteAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("push1", 1), ("push2", 2), ("pop1", 3), ("pop2", 4), ("translate1To1", 5), ("translate1To2", 6), ("translate2To1", 7), ("translate2To2", 8)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteAction.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteAction.setDescription("This object specifies the rewrite action the device performs for the service instance, including: 'push1' Add cevcSIVlanRewriteVlan1 as the VLAN tag to the service frame. 'push2' Add cevcSIVlanRewriteVlan1 as the outer VLAN tag and cevcSIVlanRewriteVlan2 as the inner VLAN tag of the service frame. 'pop1' Remove the outermost VLAN tag from the service frame. 'pop2' Remove the two outermost VLAN tags from the service frame. 'translate1To1' Replace the outermost VLAN tag with the cevcSIVlanRewriteVlan1 tag. 'translate1To2' Replace the outermost VLAN tag with cevcSIVlanRewriteVlan1 and add cevcSIVlanRewriteVlan2 to the second VLAN tag of the service frame. 'translate2To1' Remove the outermost VLAN tag and replace the second VLAN tag with cevcSIVlanVlanRewriteVlan1. 'translate2To2' Replace the outermost VLAN tag with cevcSIVlanRewriteVlan1 and the second VLAN tag with cevcSIVlanRewriteVlan2.")
cevcSIVlanRewriteEncapsulation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("dot1q", 1), ("dot1ad", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteEncapsulation.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteEncapsulation.setDescription("This object specifies the encapsulation type to process for the service instance. 'dot1q' The IEEE 802.1q encapsulation. 'dot1ad' The IEEE 802.1ad encapsulation.")
cevcSIVlanRewriteVlan1 = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 6), VlanId()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteVlan1.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteVlan1.setDescription("This object specifies the outermost VLAN ID tag of the frame for the service instance. This object is valid only when cevcSIVlanRewriteAction is 'push1', 'push2', 'translate1To1', 'translate1To2', 'translate2To1', or 'translate2To2'.")
cevcSIVlanRewriteVlan2 = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 7), VlanId()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteVlan2.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteVlan2.setDescription("This object specifies the second VLAN ID tag of the frame for the service instance. This object is valid only when cevcSIVlanRewriteAction is 'push2', 'translate1To2', or 'translate2To2'.")
cevcSIVlanRewriteSymmetric = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 3, 1, 8), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIVlanRewriteSymmetric.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteSymmetric.setDescription("This object is valid only when cevcSIVlanRewriteDirection is 'ingress'. The value 'true' of this column specifies that egress packets are tagged with a VLAN specified by an active row in cevcSIPrimaryVlanTable. There could only be one VLAN value assigned in the cevcSIPrimaryVlanTable, i.e. only one 'active' entry that has the same index value of cevcSIIndex column and corresponding instance of cevcSIPrimaryVlanEndingVlan column has value '0'.")
cevcSIL2ControlProtocolTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 4), )
if mibBuilder.loadTexts: cevcSIL2ControlProtocolTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIL2ControlProtocolTable.setDescription('This table lists the layer 2 control protocol processing attributes at service instances. This table has an expansion dependent relationship on the cevcSITable, containing a row for each layer 2 control protocol disposition at each service instance.')
cevcSIL2ControlProtocolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 4, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIL2ControlProtocolType"))
if mibBuilder.loadTexts: cevcSIL2ControlProtocolEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIL2ControlProtocolEntry.setDescription('This entry represents the layer 2 control protocol processing at a service instance. The system automatically creates an entry for each layer 2 control protocol type when an entry is created in the cevcSITable, and entries are automatically destroyed when the system destroys the corresponding row in the cevcSITable.')
cevcSIL2ControlProtocolType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 4, 1, 1), CevcL2ControlProtocolType())
if mibBuilder.loadTexts: cevcSIL2ControlProtocolType.setStatus('current')
if mibBuilder.loadTexts: cevcSIL2ControlProtocolType.setDescription('The layer 2 control protocol service frame that the service instance is to process as defined by object cevcSIL2ControlProtocolAction.')
cevcSIL2ControlProtocolAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("discard", 1), ("tunnel", 2), ("forward", 3))).clone('discard')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIL2ControlProtocolAction.setStatus('current')
if mibBuilder.loadTexts: cevcSIL2ControlProtocolAction.setDescription("The actions to be taken for a given layer 2 control protocol service frames that matches cevcSIL2ControlProtocolType, including: 'discard' The MEN must discard all ingress service frames carrying the layer 2 control protocol service frames on the EVC and the MEN must not generate any egress service frames carrying the layer 2 control protocol frames on the EVC. 'tunnel' Forward the layer 2 control protocol service frames with the MAC address changed as defined by the individual layer 2 control protocol. The EVC does not process the layer 2 protocol service frames. If a layer 2 control protocol service frame is to be tunneled, all the UNIs in the EVC must be configured to pass the layer 2 control protocol service frames to the EVC, cevcPortL2ControlProtocolAction column has the value of 'passToEvc' or 'peerAndPassToEvc'. 'forward' Forward the layer 2 conrol protocol service frames as data; similar to tunnel but layer 2 control protocol service frames are forwarded without changing the MAC address.")
cevcSICEVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5), )
if mibBuilder.loadTexts: cevcSICEVlanTable.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanTable.setDescription('This table contains the CE-VLAN map list for each Service Instance. This table has an expansion dependent relationship on the cevcSITable, containing a row for each CE-VLAN or a range of CE-VLANs that are mapped to a service instance.')
cevcSICEVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSICEVlanBeginningVlan"))
if mibBuilder.loadTexts: cevcSICEVlanEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanEntry.setDescription("This entry contains the CE-VLANs that are mapped at a Service Instance. Entries in this table may be created and deleted via the cevcSICEVlanRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcSICEVlanRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcSICEVlanRowStatus column to 'destroy'.")
cevcSICEVlanBeginningVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1, 1), VlanId())
if mibBuilder.loadTexts: cevcSICEVlanBeginningVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanBeginningVlan.setDescription("If cevcSICEVlanEndingVlan is '0', then this object indicates a single VLAN in the list. If cevcSICEVlanEndingVlan is not '0', then this object indicates the first VLAN in a range of VLANs.")
cevcSICEVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSICEVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSICEVlanTable. This object cannot be set to 'active' until all objects have been assigned valid values. Writable objects in this table can be modified while the value of the cevcSICEVlanRowStatus column is 'active'.")
cevcSICEVlanStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSICEVlanStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSICEVlanEndingVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 5, 1, 4), VlanIdOrNone()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSICEVlanEndingVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSICEVlanEndingVlan.setDescription("This object indicates the last VLAN in a range of VLANs. If the row does not describe a range, then the value of this column must be '0'.")
cevcSIMatchCriteriaTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6), )
if mibBuilder.loadTexts: cevcSIMatchCriteriaTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaTable.setDescription('This table contains the match criteria for each Service Instance. This table has an expansion dependent relationship on the cevcSITable, containing a row for each group of match criteria of each service instance.')
cevcSIMatchCriteriaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIMatchCriteriaIndex"))
if mibBuilder.loadTexts: cevcSIMatchCriteriaEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaEntry.setDescription("This entry represents a group of match criteria for a service instance. Each entry in the table with the same cevcSIIndex and different cevcSIMatchCriteriaIndex represents an OR operation of the match criteria for the service instance. Entries in this table may be created and deleted via the cevcSIMatchRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcSIMatchRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcSIMatchRowStatus column to 'destroy'.")
cevcSIMatchCriteriaIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cevcSIMatchCriteriaIndex.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaIndex.setDescription('This object indicates an arbitrary integer-value that uniquely identifies a match criteria for a service instance.')
cevcSIMatchRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIMatchCriteriaTable. If the value of cevcSIMatchCriteriaType column is 'dot1q(1)' or 'dot1ad(2)' or 'untaggedAndDot1q' or 'untaggedAndDot1ad, then cevcSIMatchCriteriaRowStatus can not be set to 'active' until there exist an active row in the cevcSIMatchEncapTable with the same index value for cevcSIIndex and cevcSIMatchCriteriaIndex. Writable objects in this table can be modified while the value of the cevcSIMatchRowStatus column is 'active'.")
cevcSIMatchStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIMatchCriteriaType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 6, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("unknown", 1), ("dot1q", 2), ("dot1ad", 3), ("untagged", 4), ("untaggedAndDot1q", 5), ("untaggedAndDot1ad", 6), ("priorityTagged", 7), ("defaultTagged", 8)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchCriteriaType.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaType.setDescription("This object specifies the criteria used to match a service instance. 'unknown' Match criteria for the service instance is not defined or unknown. 'dot1q' The IEEE 802.1q encapsulation is used as a match criteria for the service instance. The ether type value of the IEEE 802.1q tag is specified by the object cevcSIEncapEncapsulation with the same index value of cevcSIIndex and cevcSIMatchCreriaIndex. 'dot1ad' The IEEE 802.1ad encapsulation is used as a match criteria for the service instance. The ether type value of the IEEE 802.1ad tag is specified by the cevcSIEncapEncapsulation column with the same index value of cevcSIIndex and cevcSIMatchCreriaIndex. 'untagged' Service instance processes untagged service frames. Only one service instance on the interface/media type can use untagged frames as a match criteria. 'untaggedAndDot1q' Both untagged frames and the IEEE 802.1q encapsulation are used as a match criteria for the service instance. Only one service instance on the interface/media type can use untagged frames as a match criteria. The ether type value of the IEEE 802.1q tag is specified by the cevcSIEncapEncapsulation column with the same index value of cevcSIIndex and cevcSIMatchCreriaIndex. 'untaggedAndDot1ad' Both untagged frames and the IEEE 802.1ad encapsulation are used as a match criteria for the service instance. Only one service instance on the interface/media type can use untagged frames as a match criteria. The ether type value of the IEEE 802.1ad tag is specified by the cevcSIEncapEncapsulation column with the same index value of cevcSIIndex and cevcSIMatchCreriaIndex. 'priorityTagged' Service instance processes priority tagged frames. Only one service instance on the interface/media type can use priority tagged frames as a match criteria. 'defaultTagged' Service instance is a default service instance. The default service instance processes frames with VLANs that do not match to any other service instances configured on the interface/media type. Only one service instance on the interface/media type can be the default service instance.")
cevcSIMatchEncapTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7), )
if mibBuilder.loadTexts: cevcSIMatchEncapTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapTable.setDescription("This table contains the encapsulation based match criteria for each service instance. This table has a sparse dependent relationship on the cevcSIMatchCriteriaTable, containing a row for each match criteria having one of the following values for cevcSIMatchCriteriaType: - 'dot1q' - 'dot1ad' - 'untaggedAndDot1q' - 'untaggedAndDot1ad' - 'priorityTagged'")
cevcSIMatchEncapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIMatchCriteriaIndex"))
if mibBuilder.loadTexts: cevcSIMatchEncapEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapEntry.setDescription("This entry represents a group of encapulation match criteria for a service instance. Entries in this table may be created and deleted via the cevcSIMatchEncapRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of cevcSIMatchEncapRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of cevcSIMatchEncapRowStatus column to 'destroy'.")
cevcSIMatchEncapRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIMatchEncapTable. This object cannot be set to 'active' until cevcSIEncapEncapsulation and objects referred by cevcSIMatchEncapValid have been assigned their respective valid values. Writable objects in this table can be modified while the value of the cevcSIEncapMatchRowStatus column is 'active'.")
cevcSIMatchEncapStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 2), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIMatchEncapValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 3), Bits().clone(namedValues=NamedValues(("primaryCos", 0), ("secondaryCos", 1), ("payloadType", 2), ("payloadTypes", 3), ("priorityCos", 4), ("dot1qNativeVlan", 5), ("dot1adNativeVlan", 6), ("encapExact", 7)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapValid.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapValid.setDescription("This object specifies the encapsulation criteria used to match a service instance. 'primaryCos' The 'primaryCos' bit set to '1' specifies the Class of Service is used as service match criteria for the service instance. When this bit is set to '1' there must exist aleast one active rows in the cevcSIPrimaryVlanTable which has the same index values of cevcSIIndex and cevcSIMatchCriteriaIndex. When 'primaryCos' bit is '1', the cevcSIPrimaryCos column indicates the CoS value(s). 'secondaryCos' The 'secondaryCos' bit set to '1' specifies the Class of Service is used as service match criteria for the service instance. When this bit is set to '1' there must exist aleast one active rows in the cevcSISecondaryVlanTable which has the same index values of cevcSIIndex and cevcSIMatchCriteriaIndex. When 'secondaryCos' bit is '1', the cevcSISecondaryCos column indicates the CoS value(s). 'payloadType' This bit set to '1' specifies that the value of corresponding instance of cevcSIMatchEncapPayloadType is used as service match criteria for the service instance. 'payloadTypes' This bit set to '1' specifies that the value of corresponding instance of cevcSIMatchEncapPayloadTypes is used as service match criteria for the service instance. 'priorityCos' This bit set to '1' specifies that the value of corresponding instance of cevcSIMatchEncapPriorityCos is used as service match criteria for the service instance. 'dot1qNativeVlan' This bit set to '1' specifies that the IEEE 802.1q tag with native vlan is used as service match criteria for the service instance. 'dot1adNativeVlan' This bit set to '1' specifies that the IEEE 802.1ad tag with native vlan is used as service match criteria for the service instance. 'encapExact' This bit set to '1' specifies that a service frame is mapped to the service instance only if it matches exactly to the encapsulation specified by the service instance.")
cevcSIMatchEncapEncapsulation = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("dot1qEthertype0x8100", 1), ("dot1qEthertype0x9100", 2), ("dot1qEthertype0x9200", 3), ("dot1qEthertype0x88A8", 4), ("dot1adEthertype0x88A8", 5), ("dot1ahEthertype0x88A8", 6)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapEncapsulation.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapEncapsulation.setDescription("This object specifies the encapsulation type used as service match criteria. The object also specifies the Ethertype for egress packets on the service instance. 'dot1qEthertype0x8100' The IEEE 801.1q encapsulation with ether type value 0x8100. 'dot1qEthertype0x9100' The IEEE 801.1q encapsulation with ether type value 0x9100. 'dot1qEthertype0x9200' The IEEE 801.1q encapsulation with ether type value 0x9200. 'dot1qEthertype0x88A8' The IEEE 801.1q encapsulation with ether type value 0x88A8. 'dot1adEthertype0x88A8' The IEEE 801.1ad encapsulation with ether type value 0x88A8. 'dot1ahEthertype0x88A8' The IEEE 801.1ah encapsulation with ether type value 0x88A8.")
cevcSIMatchEncapPrimaryCos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 5), CiscoCosList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapPrimaryCos.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapPrimaryCos.setDescription("This object specifies the CoS values which the Service Instance uses as service match criteria. This object is valid only when 'primaryVlans' and 'primaryCos' bits are set to '1' in corresponding instance of the object cevcSIMatchEncapValid.")
cevcSIMatchEncapSecondaryCos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 6), CiscoCosList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapSecondaryCos.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapSecondaryCos.setDescription("This object specifies the CoS values which the Service Instance uses as service match criteria. This object is valid only when 'secondaryVlans' and 'secondaryCos' bits are set to '1' in corresponding instance of the object cevcSIMatchEncapValid.")
cevcSIMatchEncapPayloadType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("payloadType0x0800ip", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapPayloadType.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIMatchEncapPayloadType.setDescription("This object specifies the PayloadType(etype/protocol type) values that the service instance uses as a service match criteria. This object is required when the forwarding of layer-2 ethernet packet is done through the payloadType i.e IP etc. 'other' None of the following. 'payloadType0x0800ip' Payload type value for IP is 0x0800. This object is valid only when 'payloadType' bit is set to '1' in corresponding instance of the object cevcSIMatchEncapValid. This object is deprecated by cevcSIMatchEncapPayloadTypes.")
cevcSIMatchEncapPayloadTypes = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 8), Bits().clone(namedValues=NamedValues(("payloadTypeIPv4", 0), ("payloadTypeIPv6", 1), ("payloadTypePPPoEDiscovery", 2), ("payloadTypePPPoESession", 3), ("payloadTypePPPoEAll", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapPayloadTypes.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapPayloadTypes.setDescription("This object specifies the etype/protocol type values that service instance uses as a service match criteria. This object is required when the forwarding of layer-2 ethernet packet is done through the payload ether type i.e IP etc. 'payloadTypeIPv4' Ethernet payload type value for IPv4 protocol. 'payloadTypeIPv6' Ethernet payload type value for IPv6 protocol. 'payloadTypePPPoEDiscovery' Ethernet payload type value for PPPoE discovery stage. 'payloadTypePPPoESession' Ethernet payload type value for PPPoE session stage. 'payloadTypePPPoEAll' All ethernet payload type values for PPPoE protocol. This object is valid only when 'payloadTypes' bit is set to '1' in corresponding instance of the object cevcSIMatchEncapValid. This object deprecates cevcSIMatchEncapPayloadType.")
cevcSIMatchEncapPriorityCos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 7, 1, 9), CiscoCosList()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIMatchEncapPriorityCos.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchEncapPriorityCos.setDescription("This object specifies the priority CoS values which the service instance uses as service match criteria. This object is valid only when 'priorityCos' bit is set to '1' in corresponding instance of the object cevcSIMatchEncapValid.")
cevcSIPrimaryVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8), )
if mibBuilder.loadTexts: cevcSIPrimaryVlanTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanTable.setDescription('This table contains the primary VLAN ID list for each Service Instance. This table has an expansion dependent relationship on the cevcSIMatchEncapTable, containing zero or more rows for each encapsulation match criteria.')
cevcSIPrimaryVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIMatchCriteriaIndex"), (0, "CISCO-EVC-MIB", "cevcSIPrimaryVlanBeginningVlan"))
if mibBuilder.loadTexts: cevcSIPrimaryVlanEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanEntry.setDescription("This entry specifies a single VLAN or a range of VLANs contained in the primary VLAN list that's part of the encapsulation match criteria. Entries in this table may be created and deleted via the cevcSIPrimaryVlanRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of the cevcSIPrimaryVlanRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of the cevcSIPrimaryVlanRowStatus column to 'destroy'.")
cevcSIPrimaryVlanBeginningVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1, 1), VlanId())
if mibBuilder.loadTexts: cevcSIPrimaryVlanBeginningVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanBeginningVlan.setDescription("If cevcSIPrimaryVlanEndingVlan is '0', then this object indicates a single VLAN in the list. If cevcSIPrimaryVlanEndingVlan is not '0', then this object indicates the first VLAN in a range of VLANs.")
cevcSIPrimaryVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIPrimaryVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIPrimaryVlanTable. This column cannot be set to 'active' until all objects have been assigned valid values. Writable objects in this table can be modified while the value of the cevcSIPrimaryVlanRowStatus column is 'active'.")
cevcSIPrimaryVlanStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIPrimaryVlanStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIPrimaryVlanEndingVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 8, 1, 4), VlanIdOrNone()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIPrimaryVlanEndingVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSIPrimaryVlanEndingVlan.setDescription("This object indicates the last VLAN in a range of VLANs. If the row does not describe a range, then the value of this column must be '0'.")
cevcSISecondaryVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9), )
if mibBuilder.loadTexts: cevcSISecondaryVlanTable.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanTable.setDescription('This table contains the seconadary VLAN ID list for each service instance. This table has an expansion dependent relationship on the cevcSIMatchEncapTable, containing zero or more rows for each encapsulation match criteria.')
cevcSISecondaryVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"), (0, "CISCO-EVC-MIB", "cevcSIMatchCriteriaIndex"), (0, "CISCO-EVC-MIB", "cevcSISecondaryVlanBeginningVlan"))
if mibBuilder.loadTexts: cevcSISecondaryVlanEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanEntry.setDescription("This entry specifies a single VLAN or a range of VLANs contained in the secondary VLAN list that's part of the encapsulation match criteria. Entries in this table may be created and deleted via the cevcSISecondaryVlanRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of the cevcSISecondaryVlanRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of the cevcSISecondaryVlanRowStatus column to 'destroy'.")
cevcSISecondaryVlanBeginningVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1, 1), VlanId())
if mibBuilder.loadTexts: cevcSISecondaryVlanBeginningVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanBeginningVlan.setDescription("If cevcSISecondaryVlanEndingVlan is '0', then this object indicates a single VLAN in the list. If cevcSISecondaryVlanEndingVlan is not '0', then this object indicates the first VLAN in a range of VLANs.")
cevcSISecondaryVlanRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSISecondaryVlanRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSISecondaryVlanTable. This column can not be set to 'active' until all objects have been assigned valid values. Writable objects in this table can be modified while the value of cevcSISecondaryVlanRowStatus column is 'active'.")
cevcSISecondaryVlanStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSISecondaryVlanStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSISecondaryVlanEndingVlan = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 9, 1, 4), VlanIdOrNone()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSISecondaryVlanEndingVlan.setStatus('current')
if mibBuilder.loadTexts: cevcSISecondaryVlanEndingVlan.setDescription("This object indicates the last VLAN in a range of VLANs. If the row does not describe a range, then the value of this column must be '0'.")
cevcSIForwardBdTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10), )
if mibBuilder.loadTexts: cevcSIForwardBdTable.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdTable.setDescription("This table contains the forwarding bridge domain information for each service instance. This table has a sparse dependent relationship on the cevcSITable, containing a row for each service instance having a cevcSIForwardingType of 'bridgeDomain'.")
cevcSIForwardBdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1), ).setIndexNames((0, "CISCO-EVC-MIB", "cevcSIIndex"))
if mibBuilder.loadTexts: cevcSIForwardBdEntry.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdEntry.setDescription("This entry represents an bridged domain used to forward service frames by the service instance. Entries in this table may be created and deleted via the cevcSIForwardBdRowStatus object or the management console on the system. Using SNMP, rows are created by a SET request setting the value of the cevcSIForwardBdRowStatus column to 'createAndGo' or 'createAndWait'. Rows are deleted by a SET request setting the value of the cevcSIForwardBdRowStatus column to 'destroy'.")
cevcSIForwardBdRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdRowStatus.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdRowStatus.setDescription("This object enables a SNMP peer to create, modify, and delete rows in the cevcSIForwardBdTable. This column can not be set to 'active' until all objects have been assigned valid values. Writable objects in this table can be modified while the value of the cevcSIForwardBdRowStatus column is 'active'.")
cevcSIForwardBdStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 2), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdStorageType.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdStorageType.setDescription("This object specifies how the SNMP entity stores the data contained by the corresponding conceptual row. This object can be set to either 'volatile' or 'nonVolatile'. Other values are not applicable for this conceptual row and are not supported.")
cevcSIForwardBdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 3), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIForwardBdNumber.setDescription('The bridge domain identifier that is associated with the service instance. A bridge domain refers to a layer 2 broadcast domain spanning a set of physical or virtual ports. Frames are switched Multicast and unknown destination unicast frames are flooded within the confines of the bridge domain.')
cevcSIForwardBdNumberBase = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 4096, 8192, 12288, 16384))).clone(namedValues=NamedValues(("bdNumBase0", 0), ("bdNumBase4096", 4096), ("bdNumBase8192", 8192), ("bdNumBase12288", 12288), ("bdNumBase16384", 16384)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumberBase.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumberBase.setDescription('This object specifies the base of bridge domain. The bridge domain range is 1~16k, cevcSIForwardBdNumberBase is to track what is the base of each 4k bitmap. In this way we can specify all the 16k bridge domains using four 1k bitmaps and having the base which describes that is the base of each 4k bitmap. The four 1k bitmaps, cevcSIForwardBdNumber1kBitmap represents 0~1023, cevcSIForwardBdNumber2kBitmap represents 1024~2047, cevcSIForwardBdNumber3kBitmap represents 2048~3071, cevcSIForwardBdNumber4kBitmap represents 3072~4095 And cevcSIForwardBdNumberBase is one of 0, 4096, 8192, 12288, 16384. SNMP Administrator can use cevcSIForwardBdNumberBase + (position of the set bit in four 1k bitmaps) to get BD number of a service instance.')
cevcSIForwardBdNumber1kBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber1kBitmap.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumber1kBitmap.setDescription("This object specifies a string of octets containing one bit per Bridge domain per service instance(generally we have one bridge domain per nontrunk service instance but can have more than one bridge configured with a trunk service instance). The first octet corresponds to Bridge domains with Bridge domain ID values of 0 through 7; the second octet to Bridge domains 8 through 15; etc. Thus, this 128-octet bitmap represents bridge domain ID value 0~1023. For each Bridge domain configured, the bit corresponding to that bridge domain is set to '1'. SNMP Administrator uses cevcSIForwardBdNumberBase + (position of the set bit in bitmap)to calculate BD number of a service instance. Note that if the length of this string is less than 128 octets, any 'missing' octets are assumed to contain the value zero. An NMS may omit any zero-valued octets from the end of this string in order to reduce SetPDU size, and the agent may also omit zero-valued trailing octets, to reduce the size of GetResponse PDUs.")
cevcSIForwardBdNumber2kBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber2kBitmap.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumber2kBitmap.setDescription("This object specifies a string of octets containing one bit per Bridge domain per service instance(generally we have one bridge domain per nontrunk service instance but can have more than one bridge configured with a trunk service instance). The first octet corresponds to Bridge domains with Bridge domain ID values of 1024 through 1031; the second octet to Bridge domains 1032 through 1039; etc. Thus, this 128-octet bitmap represents bridge domain ID value 1024~2047. For each Bridge domain configured, the bit corresponding to that bridge domain is set to 1. SNMP Administrator uses cevcSIForwardBdNumberBase + (position of the set bit in bitmap)to calculate BD number of a service instance. Note that if the length of this string is less than 128 octets, any 'missing' octets are assumed to contain the value zero. An NMS may omit any zero-valued octets from the end of this string in order to reduce SetPDU size, and the agent may also omit zero-valued trailing octets, to reduce the size of GetResponse PDUs.")
cevcSIForwardBdNumber3kBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 7), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber3kBitmap.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumber3kBitmap.setDescription("This object specifies a string of octets containing one bit per Bridge domain per service instance(generally we have one bridge domain per non-trunk service instance but can have more than one bridge configured with a trunk service instance). The first octet corresponds to Bridge domains with Bridgedomain ID values of 2048 through 2055; the second octet to Bridge domains 2056 through 2063; etc. Thus, this 128-octet bitmap represents bridge domain ID value 2048~3071. For each Bridge domain configured, the bit corresponding to that bridge domain is set to 1. SNMP Administrator uses cevcSIForwardBdNumberBase + (position of the set bit in bitmap)to calculate BD number of a service instance. Note that if the length of this string is less than 128 octets, any 'missing' octets are assumed to contain the value zero. An NMS may omit any zero-valued octets from the end of this string in order to reduce SetPDU size, and the agent may also omit zero-valued trailing octets, to reduce the size of GetResponse PDUs.")
cevcSIForwardBdNumber4kBitmap = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 4, 10, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cevcSIForwardBdNumber4kBitmap.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardBdNumber4kBitmap.setDescription("This object specifies a string of octets containing one bit per Bridge domain per service instance(generally we have one bridge domain per non-trunk service instance but can have more than one bridge configured with a trunk service instance). The first octet corresponds to Bridge domains with Bridgedomain ID values of 3078 through 3085; the second octet to Bridge domains 3086 through 3093; etc. Thus, this 128-octet bitmap represents bridge domain ID value 3072~4095. For each Bridge domain configured, the bit corresponding to that bridge domain is set to 1. SNMP Administrator uses cevcSIForwardBdNumberBase + (position of the set bit in bitmap)to calculate BD number of a service instance. Note that if the length of this string is less than 128 octets, any 'missing' octets are assumed to contain the value zero. An NMS may omit any zero-valued octets from the end of this string in order to reduce SetPDU size, and the agent may also omit zero-valued trailing octets, to reduce the size of GetResponse PDUs.")
cevcEvcNotifyEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 613, 1, 5, 1), Bits().clone(namedValues=NamedValues(("status", 0), ("creation", 1), ("deletion", 2), ("macSecurityViolation", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cevcEvcNotifyEnabled.setStatus('current')
if mibBuilder.loadTexts: cevcEvcNotifyEnabled.setDescription("This object specifies the system generation of notification, including: 'status' This bit set to '1' specifies the system generation of cevcEvcStatusChangedNotification. 'creation' This bit set to '1' specifies the system generation of cevcEvcCreationNotification. 'deletion' This bit set to '1' specifices the system generation of cevcEvcDeletionNotification. 'macSecurityViolation' This bit set to '1' specifies the system generation of cevcMacSecurityViolation.")
ciscoEvcNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0))
cevcEvcStatusChangedNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0, 1)).setObjects(("CISCO-EVC-MIB", "cevcEvcOperStatus"), ("CISCO-EVC-MIB", "cevcEvcCfgUnis"), ("CISCO-EVC-MIB", "cevcEvcActiveUnis"))
if mibBuilder.loadTexts: cevcEvcStatusChangedNotification.setStatus('current')
if mibBuilder.loadTexts: cevcEvcStatusChangedNotification.setDescription("A device generates this notification when an EVC's operational status changes, or the number of active UNIs associated with the EVC (cevcNumActiveUnis) changes.")
cevcEvcCreationNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0, 2)).setObjects(("CISCO-EVC-MIB", "cevcEvcOperStatus"))
if mibBuilder.loadTexts: cevcEvcCreationNotification.setStatus('current')
if mibBuilder.loadTexts: cevcEvcCreationNotification.setDescription('A device generates this notification upon the creation of an EVC.')
cevcEvcDeletionNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0, 3)).setObjects(("CISCO-EVC-MIB", "cevcEvcOperStatus"))
if mibBuilder.loadTexts: cevcEvcDeletionNotification.setStatus('current')
if mibBuilder.loadTexts: cevcEvcDeletionNotification.setDescription('A device generates this notification upon the deletion of an EVC.')
cevcMacSecurityViolationNotification = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 613, 0, 0, 4)).setObjects(("IF-MIB", "ifIndex"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumberBase"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber1kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber2kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber3kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber4kBitmap"), ("CISCO-EVC-MIB", "cevcSIID"), ("CISCO-EVC-MIB", "cevcMacAddress"), ("CISCO-EVC-MIB", "cevcMaxMacConfigLimit"), ("CISCO-EVC-MIB", "cevcViolationCause"))
if mibBuilder.loadTexts: cevcMacSecurityViolationNotification.setStatus('current')
if mibBuilder.loadTexts: cevcMacSecurityViolationNotification.setDescription("A SNMP entity generates this notification in the following cases: When the system MAC Address limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedSystemLimit' value. When the Bridge domain limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedBdLimit' value. When the Service Instance limit is exceeded, the cevcMacSecurityViolationCauseType will contain 'exceedSILimit' value. If the MAC address is present in the Black list then cevcMacSecurityViolationCauseType will contain 'blackListDeny' value. Description of all the varbinds for this Notification is as follows: ifIndex indicates the interface index which identifies the interface that the service instance is attached. cevcSIForwardBdNumberBase indicates the base of bridge domain. The bridge domain range is 1~16k, this object is to track the base of each 4k bitmap. cevcSIForwardBdNumber1kBitmap indicates a string of octets containing one bit per Bridge domain per service instance. This 128-octet bitmap represents bridge domain ID values 0~1023. cevcSIForwardBdNumber2kBitmap indicates a string of octets containing one bit per Bridge domain per service instance. This 128-octet bitmap represents bridge domain ID values 1024~2047. cevcSIForwardBdNumber3kBitmap indicates a string of octets containing one bit per Bridge domain per service instance. This 128-octet bitmap represents bridge domain ID values 2048~3071. cevcSIForwardBdNumber4kBitmap indicates a string of octets containing one bit per Bridge domain per service instance. This 128-octet bitmap represents bridge domain ID values 3072~4095. cevcSIID indicates the service instance ID for the Mac security violation notification. cevcMacAddress indicates the Mac address which has violated the Mac security rules. cevcMaxMacConfigLimit indicates the maximum Mac configuration limit. This is also sent as a part of Mac security violation notification. cevcViolationCause indicates the Mac security violation cause.")
ciscoEvcMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 1))
ciscoEvcMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2))
ciscoEvcMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 1, 1)).setObjects(("CISCO-EVC-MIB", "cevcSystemGroup"), ("CISCO-EVC-MIB", "cevcPortGroup"), ("CISCO-EVC-MIB", "cevcEvcGroup"), ("CISCO-EVC-MIB", "cevcSIGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationConfigGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationGroup"), ("CISCO-EVC-MIB", "cevcSICosMatchCriteriaGroup"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteGroup"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaGroup"), ("CISCO-EVC-MIB", "cevcSIForwardGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcMIBCompliance = ciscoEvcMIBCompliance.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoEvcMIBCompliance.setDescription('The new compliance statement for entities which implement the CISCO-EVC-MIB.')
ciscoEvcMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 1, 2)).setObjects(("CISCO-EVC-MIB", "cevcSystemGroup"), ("CISCO-EVC-MIB", "cevcPortGroup"), ("CISCO-EVC-MIB", "cevcEvcGroup"), ("CISCO-EVC-MIB", "cevcSIGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationConfigGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationGroup"), ("CISCO-EVC-MIB", "cevcSICosMatchCriteriaGroup"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteGroup"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaGroupRev1"), ("CISCO-EVC-MIB", "cevcSIForwardGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcMIBComplianceRev1 = ciscoEvcMIBComplianceRev1.setStatus('deprecated')
if mibBuilder.loadTexts: ciscoEvcMIBComplianceRev1.setDescription('The compliance statement for entities which implement the CISCO-EVC-MIB. This compliance module deprecates ciscoEvcMIBCompliance.')
ciscoEvcMIBComplianceRev2 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 1, 3)).setObjects(("CISCO-EVC-MIB", "cevcSystemGroup"), ("CISCO-EVC-MIB", "cevcPortGroup"), ("CISCO-EVC-MIB", "cevcEvcGroup"), ("CISCO-EVC-MIB", "cevcSIGroupRev1"), ("CISCO-EVC-MIB", "cevcEvcNotificationConfigGroup"), ("CISCO-EVC-MIB", "cevcEvcNotificationGroupRev1"), ("CISCO-EVC-MIB", "cevcSICosMatchCriteriaGroup"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteGroup"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaGroupRev1"), ("CISCO-EVC-MIB", "cevcSIForwardGroupRev1"), ("CISCO-EVC-MIB", "cevcMacSecurityViolationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoEvcMIBComplianceRev2 = ciscoEvcMIBComplianceRev2.setStatus('current')
if mibBuilder.loadTexts: ciscoEvcMIBComplianceRev2.setDescription('The compliance statement for entities which implement the CISCO-EVC-MIB. This compliance module deprecates ciscoEvcMIBComplianceRev1.')
cevcSystemGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 1)).setObjects(("CISCO-EVC-MIB", "cevcMaxNumEvcs"), ("CISCO-EVC-MIB", "cevcNumCfgEvcs"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSystemGroup = cevcSystemGroup.setStatus('current')
if mibBuilder.loadTexts: cevcSystemGroup.setDescription('A collection of objects providing system configuration of EVCs.')
cevcPortGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 2)).setObjects(("CISCO-EVC-MIB", "cevcPortMode"), ("CISCO-EVC-MIB", "cevcPortMaxNumEVCs"), ("CISCO-EVC-MIB", "cevcPortMaxNumServiceInstances"), ("CISCO-EVC-MIB", "cevcPortL2ControlProtocolAction"), ("CISCO-EVC-MIB", "cevcUniIdentifier"), ("CISCO-EVC-MIB", "cevcUniPortType"), ("CISCO-EVC-MIB", "cevcUniServiceAttributes"), ("CISCO-EVC-MIB", "cevcUniCEVlanEvcEndingVlan"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcPortGroup = cevcPortGroup.setStatus('current')
if mibBuilder.loadTexts: cevcPortGroup.setDescription('A collection of objects providing configuration for ports in an EVC.')
cevcEvcGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 3)).setObjects(("CISCO-EVC-MIB", "cevcEvcIdentifier"), ("CISCO-EVC-MIB", "cevcEvcType"), ("CISCO-EVC-MIB", "cevcEvcOperStatus"), ("CISCO-EVC-MIB", "cevcEvcCfgUnis"), ("CISCO-EVC-MIB", "cevcEvcActiveUnis"), ("CISCO-EVC-MIB", "cevcEvcStorageType"), ("CISCO-EVC-MIB", "cevcEvcRowStatus"), ("CISCO-EVC-MIB", "cevcEvcUniId"), ("CISCO-EVC-MIB", "cevcEvcUniOperStatus"), ("CISCO-EVC-MIB", "cevcEvcLocalUniIfIndex"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcEvcGroup = cevcEvcGroup.setStatus('current')
if mibBuilder.loadTexts: cevcEvcGroup.setDescription('A collection of objects providing configuration and status information for EVCs.')
cevcSIGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 4)).setObjects(("CISCO-EVC-MIB", "cevcSIName"), ("CISCO-EVC-MIB", "cevcSITargetType"), ("CISCO-EVC-MIB", "cevcSITarget"), ("CISCO-EVC-MIB", "cevcSIEvcIndex"), ("CISCO-EVC-MIB", "cevcSIRowStatus"), ("CISCO-EVC-MIB", "cevcSIStorageType"), ("CISCO-EVC-MIB", "cevcSIAdminStatus"), ("CISCO-EVC-MIB", "cevcSIOperStatus"), ("CISCO-EVC-MIB", "cevcSIL2ControlProtocolAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteEncapsulation"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan1"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan2"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteSymmetric"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteStorageType"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteRowStatus"), ("CISCO-EVC-MIB", "cevcSIForwardingType"), ("CISCO-EVC-MIB", "cevcSICEVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSICEVlanStorageType"), ("CISCO-EVC-MIB", "cevcSICEVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSIMatchStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchEncapStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapValid"), ("CISCO-EVC-MIB", "cevcSIMatchEncapEncapsulation"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanEndingVlan"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIGroup = cevcSIGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIGroup.setDescription('A collection of objects providing configuration and match criteria for service instances. cevcSIGroup object is superseded by cevcSIGroupRev1.')
cevcSIVlanRewriteGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 5)).setObjects(("CISCO-EVC-MIB", "cevcSIVlanRewriteAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteEncapsulation"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan1"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan2"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteSymmetric"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteStorageType"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteRowStatus"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIVlanRewriteGroup = cevcSIVlanRewriteGroup.setStatus('current')
if mibBuilder.loadTexts: cevcSIVlanRewriteGroup.setDescription('A collection of objects which provides VLAN rewrite information for a service instance.')
cevcSICosMatchCriteriaGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 6)).setObjects(("CISCO-EVC-MIB", "cevcSIMatchEncapPrimaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapSecondaryCos"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSICosMatchCriteriaGroup = cevcSICosMatchCriteriaGroup.setStatus('current')
if mibBuilder.loadTexts: cevcSICosMatchCriteriaGroup.setDescription('A collection of objects which provides CoS match criteria for a service instance.')
cevcSIMatchCriteriaGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 7)).setObjects(("CISCO-EVC-MIB", "cevcSIMatchRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchEncapStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapValid"), ("CISCO-EVC-MIB", "cevcSIMatchEncapEncapsulation"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPrimaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapSecondaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPayloadType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanEndingVlan"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIMatchCriteriaGroup = cevcSIMatchCriteriaGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIMatchCriteriaGroup.setDescription('A collection of objects providing match criteria information for service instances. cevcSIMatchCriteriaGroup object is superseded by cevcSIMatchCriteriaGroupRev1.')
cevcSIForwardGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 8)).setObjects(("CISCO-EVC-MIB", "cevcSIForwardingType"), ("CISCO-EVC-MIB", "cevcSIForwardBdRowStatus"), ("CISCO-EVC-MIB", "cevcSIForwardBdStorageType"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIForwardGroup = cevcSIForwardGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cevcSIForwardGroup.setDescription('A collection of objects providing service frame forwarding information for service instances. cevcSIForwardGroup object is superseded by cevcSIForwardGroupRev1.')
cevcEvcNotificationConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 9)).setObjects(("CISCO-EVC-MIB", "cevcEvcNotifyEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcEvcNotificationConfigGroup = cevcEvcNotificationConfigGroup.setStatus('current')
if mibBuilder.loadTexts: cevcEvcNotificationConfigGroup.setDescription('A collection of objects for configuring notification of this MIB.')
cevcEvcNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 10)).setObjects(("CISCO-EVC-MIB", "cevcEvcStatusChangedNotification"), ("CISCO-EVC-MIB", "cevcEvcCreationNotification"), ("CISCO-EVC-MIB", "cevcEvcDeletionNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcEvcNotificationGroup = cevcEvcNotificationGroup.setStatus('deprecated')
if mibBuilder.loadTexts: cevcEvcNotificationGroup.setDescription('A collection of notifications that this MIB module is required to implement. cevcEvcNotificationGroup object is superseded by cevcEvcNotificationGroupRev1.')
cevcSIMatchCriteriaGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 11)).setObjects(("CISCO-EVC-MIB", "cevcSIMatchRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchEncapStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapValid"), ("CISCO-EVC-MIB", "cevcSIMatchEncapEncapsulation"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPrimaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapSecondaryCos"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPayloadTypes"), ("CISCO-EVC-MIB", "cevcSIMatchEncapPriorityCos"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSISecondaryVlanEndingVlan"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIMatchCriteriaGroupRev1 = cevcSIMatchCriteriaGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cevcSIMatchCriteriaGroupRev1.setDescription('A collection of objects providing match criteria information for service instances. This group deprecates the old group cevcSIMatchCriteriaGroup.')
cevcEvcNotificationGroupRev1 = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 12)).setObjects(("CISCO-EVC-MIB", "cevcEvcStatusChangedNotification"), ("CISCO-EVC-MIB", "cevcEvcCreationNotification"), ("CISCO-EVC-MIB", "cevcEvcDeletionNotification"), ("CISCO-EVC-MIB", "cevcMacSecurityViolationNotification"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcEvcNotificationGroupRev1 = cevcEvcNotificationGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cevcEvcNotificationGroupRev1.setDescription('A collection of notifications that this MIB module is required to implement. This module deprecates the cevcNotificationGroup')
cevcSIGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 13)).setObjects(("CISCO-EVC-MIB", "cevcSIName"), ("CISCO-EVC-MIB", "cevcSITargetType"), ("CISCO-EVC-MIB", "cevcSITarget"), ("CISCO-EVC-MIB", "cevcSIEvcIndex"), ("CISCO-EVC-MIB", "cevcSIRowStatus"), ("CISCO-EVC-MIB", "cevcSIStorageType"), ("CISCO-EVC-MIB", "cevcSIAdminStatus"), ("CISCO-EVC-MIB", "cevcSIOperStatus"), ("CISCO-EVC-MIB", "cevcPortL2ControlProtocolAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteAction"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteEncapsulation"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan1"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteVlan2"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteSymmetric"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteRowStatus"), ("CISCO-EVC-MIB", "cevcSIVlanRewriteStorageType"), ("CISCO-EVC-MIB", "cevcSIForwardingType"), ("CISCO-EVC-MIB", "cevcSICEVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSICEVlanStorageType"), ("CISCO-EVC-MIB", "cevcSICEVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSIMatchStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchCriteriaType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapRowStatus"), ("CISCO-EVC-MIB", "cevcSIMatchEncapStorageType"), ("CISCO-EVC-MIB", "cevcSIMatchEncapValid"), ("CISCO-EVC-MIB", "cevcSIMatchEncapEncapsulation"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanRowStatus"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanStorageType"), ("CISCO-EVC-MIB", "cevcSIPrimaryVlanEndingVlan"), ("CISCO-EVC-MIB", "cevcSIMatchRowStatus"), ("CISCO-EVC-MIB", "cevcSICreationType"), ("CISCO-EVC-MIB", "cevcSIType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIGroupRev1 = cevcSIGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cevcSIGroupRev1.setDescription('A collection of objects providing configuration and match criteria for service instances. This module deprecates the cevcSIGroup')
cevcSIForwardGroupRev1 = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 14)).setObjects(("CISCO-EVC-MIB", "cevcSIForwardingType"), ("CISCO-EVC-MIB", "cevcSIForwardBdRowStatus"), ("CISCO-EVC-MIB", "cevcSIForwardBdStorageType"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumberBase"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber1kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber2kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber3kBitmap"), ("CISCO-EVC-MIB", "cevcSIForwardBdNumber4kBitmap"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcSIForwardGroupRev1 = cevcSIForwardGroupRev1.setStatus('current')
if mibBuilder.loadTexts: cevcSIForwardGroupRev1.setDescription('A collection of objects providing service frame forwarding information for service instances. This module deprecates cevcSIForwardGroup')
cevcMacSecurityViolationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 613, 2, 2, 15)).setObjects(("CISCO-EVC-MIB", "cevcMacAddress"), ("CISCO-EVC-MIB", "cevcMaxMacConfigLimit"), ("CISCO-EVC-MIB", "cevcSIID"), ("CISCO-EVC-MIB", "cevcViolationCause"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cevcMacSecurityViolationGroup = cevcMacSecurityViolationGroup.setStatus('current')
if mibBuilder.loadTexts: cevcMacSecurityViolationGroup.setDescription('A collection of objects providing the maximum configured MAC limit, the MAC address, service instance ID and Violation cause for Mac Security Violation Information.')
mibBuilder.exportSymbols("CISCO-EVC-MIB", cevcEvcStorageType=cevcEvcStorageType, cevcSIType=cevcSIType, cevcEvc=cevcEvc, cevcEvcUniOperStatus=cevcEvcUniOperStatus, ciscoEvcMIBCompliance=ciscoEvcMIBCompliance, cevcEvcUniTable=cevcEvcUniTable, cevcSystemGroup=cevcSystemGroup, cevcNumCfgEvcs=cevcNumCfgEvcs, cevcSICEVlanRowStatus=cevcSICEVlanRowStatus, cevcSIName=cevcSIName, cevcSICreationType=cevcSICreationType, cevcSIForwardBdNumberBase=cevcSIForwardBdNumberBase, cevcSIMatchEncapPayloadTypes=cevcSIMatchEncapPayloadTypes, cevcPortTable=cevcPortTable, ServiceInstanceTarget=ServiceInstanceTarget, cevcSIMatchEncapEntry=cevcSIMatchEncapEntry, PYSNMP_MODULE_ID=ciscoEvcMIB, cevcEvcCfgUnis=cevcEvcCfgUnis, cevcSIForwardGroupRev1=cevcSIForwardGroupRev1, cevcSIForwardBdEntry=cevcSIForwardBdEntry, cevcSIEvcIndex=cevcSIEvcIndex, cevcSIForwardBdNumber3kBitmap=cevcSIForwardBdNumber3kBitmap, cevcSIMatchCriteriaIndex=cevcSIMatchCriteriaIndex, cevcSIGroup=cevcSIGroup, cevcSIForwardBdTable=cevcSIForwardBdTable, cevcEvcUniId=cevcEvcUniId, cevcEvcStatusChangedNotification=cevcEvcStatusChangedNotification, cevcViolationCause=cevcViolationCause, cevcSIPrimaryVlanEntry=cevcSIPrimaryVlanEntry, cevcEvcGroup=cevcEvcGroup, cevcSIIndex=cevcSIIndex, cevcSIVlanRewriteVlan1=cevcSIVlanRewriteVlan1, cevcSITable=cevcSITable, cevcSISecondaryVlanStorageType=cevcSISecondaryVlanStorageType, cevcUniPortType=cevcUniPortType, cevcEvcStateTable=cevcEvcStateTable, cevcSIVlanRewriteSymmetric=cevcSIVlanRewriteSymmetric, cevcSIMatchEncapRowStatus=cevcSIMatchEncapRowStatus, cevcMacSecurityViolationNotification=cevcMacSecurityViolationNotification, cevcEvcNotificationGroupRev1=cevcEvcNotificationGroupRev1, ciscoEvcMIB=ciscoEvcMIB, ciscoEvcMIBComplianceRev1=ciscoEvcMIBComplianceRev1, cevcUniTable=cevcUniTable, cevcUniCEVlanEvcTable=cevcUniCEVlanEvcTable, cevcEvcOperStatus=cevcEvcOperStatus, ciscoEvcMIBNotifications=ciscoEvcMIBNotifications, ciscoEvcMIBComplianceRev2=ciscoEvcMIBComplianceRev2, cevcSIVlanRewriteEntry=cevcSIVlanRewriteEntry, cevcUniCEVlanEvcBeginningVlan=cevcUniCEVlanEvcBeginningVlan, cevcSIPrimaryVlanEndingVlan=cevcSIPrimaryVlanEndingVlan, cevcSIMatchEncapPayloadType=cevcSIMatchEncapPayloadType, cevcSISecondaryVlanRowStatus=cevcSISecondaryVlanRowStatus, cevcEvcStateEntry=cevcEvcStateEntry, cevcPortGroup=cevcPortGroup, cevcSIPrimaryVlanStorageType=cevcSIPrimaryVlanStorageType, cevcSIMatchCriteriaType=cevcSIMatchCriteriaType, cevcSICEVlanTable=cevcSICEVlanTable, cevcSITarget=cevcSITarget, cevcSIAdminStatus=cevcSIAdminStatus, cevcSIL2ControlProtocolType=cevcSIL2ControlProtocolType, ciscoEvcNotificationPrefix=ciscoEvcNotificationPrefix, CiscoEvcIndexOrZero=CiscoEvcIndexOrZero, cevcEvcIdentifier=cevcEvcIdentifier, cevcSIStateEntry=cevcSIStateEntry, cevcSIVlanRewriteTable=cevcSIVlanRewriteTable, cevcSIMatchCriteriaEntry=cevcSIMatchCriteriaEntry, cevcEvcRowStatus=cevcEvcRowStatus, cevcEvcNotificationGroup=cevcEvcNotificationGroup, cevcSIForwardBdNumber2kBitmap=cevcSIForwardBdNumber2kBitmap, cevcMaxNumEvcs=cevcMaxNumEvcs, cevcSIL2ControlProtocolTable=cevcSIL2ControlProtocolTable, cevcEvcUniIndex=cevcEvcUniIndex, cevcEvcIndex=cevcEvcIndex, cevcServiceInstance=cevcServiceInstance, cevcUniCEVlanEvcEntry=cevcUniCEVlanEvcEntry, cevcSICEVlanEntry=cevcSICEVlanEntry, cevcSIVlanRewriteDirection=cevcSIVlanRewriteDirection, cevcSIID=cevcSIID, cevcSIMatchEncapEncapsulation=cevcSIMatchEncapEncapsulation, ciscoEvcMIBObjects=ciscoEvcMIBObjects, ServiceInstanceTargetType=ServiceInstanceTargetType, cevcPort=cevcPort, cevcSIVlanRewriteVlan2=cevcSIVlanRewriteVlan2, cevcSIForwardBdNumber1kBitmap=cevcSIForwardBdNumber1kBitmap, cevcMaxMacConfigLimit=cevcMaxMacConfigLimit, cevcSIMatchEncapSecondaryCos=cevcSIMatchEncapSecondaryCos, cevcPortMaxNumServiceInstances=cevcPortMaxNumServiceInstances, cevcEvcNotifyEnabled=cevcEvcNotifyEnabled, cevcEvcType=cevcEvcType, cevcMacSecurityViolation=cevcMacSecurityViolation, cevcEvcDeletionNotification=cevcEvcDeletionNotification, ciscoEvcMIBGroups=ciscoEvcMIBGroups, cevcSIL2ControlProtocolAction=cevcSIL2ControlProtocolAction, cevcSIVlanRewriteGroup=cevcSIVlanRewriteGroup, cevcUniServiceAttributes=cevcUniServiceAttributes, cevcSIMatchRowStatus=cevcSIMatchRowStatus, cevcSICEVlanStorageType=cevcSICEVlanStorageType, cevcSICEVlanBeginningVlan=cevcSICEVlanBeginningVlan, cevcSIMatchEncapStorageType=cevcSIMatchEncapStorageType, cevcSIL2ControlProtocolEntry=cevcSIL2ControlProtocolEntry, cevcSIMatchCriteriaTable=cevcSIMatchCriteriaTable, cevcEvcActiveUnis=cevcEvcActiveUnis, cevcSIVlanRewriteAction=cevcSIVlanRewriteAction, ciscoEvcMIBCompliances=ciscoEvcMIBCompliances, cevcSICEVlanEndingVlan=cevcSICEVlanEndingVlan, cevcSIPrimaryVlanTable=cevcSIPrimaryVlanTable, cevcSIVlanRewriteEncapsulation=cevcSIVlanRewriteEncapsulation, cevcSIForwardingType=cevcSIForwardingType, cevcSISecondaryVlanBeginningVlan=cevcSISecondaryVlanBeginningVlan, cevcSystem=cevcSystem, ciscoEvcMIBConformance=ciscoEvcMIBConformance, cevcMacSecurityViolationGroup=cevcMacSecurityViolationGroup, cevcSIMatchEncapPriorityCos=cevcSIMatchEncapPriorityCos, cevcSIOperStatus=cevcSIOperStatus, CiscoEvcIndex=CiscoEvcIndex, cevcSIMatchCriteriaGroup=cevcSIMatchCriteriaGroup, cevcSITargetType=cevcSITargetType, cevcPortL2ControlProtocolTable=cevcPortL2ControlProtocolTable, cevcUniIdentifier=cevcUniIdentifier, cevcSISecondaryVlanTable=cevcSISecondaryVlanTable, cevcSIStorageType=cevcSIStorageType, CevcL2ControlProtocolType=CevcL2ControlProtocolType, cevcSIMatchCriteriaGroupRev1=cevcSIMatchCriteriaGroupRev1, cevcSICosMatchCriteriaGroup=cevcSICosMatchCriteriaGroup, cevcSIForwardGroup=cevcSIForwardGroup, cevcEvcUniEntry=cevcEvcUniEntry, cevcEvcNotificationConfigGroup=cevcEvcNotificationConfigGroup, cevcPortL2ControlProtocolAction=cevcPortL2ControlProtocolAction, CevcMacSecurityViolationCauseType=CevcMacSecurityViolationCauseType, cevcSIRowStatus=cevcSIRowStatus, cevcEvcEntry=cevcEvcEntry, cevcEvcCreationNotification=cevcEvcCreationNotification, cevcEvcLocalUniIfIndex=cevcEvcLocalUniIfIndex, cevcUniEntry=cevcUniEntry, cevcSIVlanRewriteRowStatus=cevcSIVlanRewriteRowStatus, cevcPortMaxNumEVCs=cevcPortMaxNumEVCs, cevcPortL2ControlProtocolType=cevcPortL2ControlProtocolType, cevcSISecondaryVlanEntry=cevcSISecondaryVlanEntry, cevcUniCEVlanEvcEndingVlan=cevcUniCEVlanEvcEndingVlan, cevcSIForwardBdRowStatus=cevcSIForwardBdRowStatus, cevcPortMode=cevcPortMode, cevcMacAddress=cevcMacAddress, cevcSIMatchEncapValid=cevcSIMatchEncapValid, cevcUniEvcIndex=cevcUniEvcIndex, cevcPortL2ControlProtocolEntry=cevcPortL2ControlProtocolEntry, cevcSIVlanRewriteStorageType=cevcSIVlanRewriteStorageType, cevcSIStateTable=cevcSIStateTable, cevcSIPrimaryVlanRowStatus=cevcSIPrimaryVlanRowStatus, cevcSIMatchEncapTable=cevcSIMatchEncapTable, cevcSISecondaryVlanEndingVlan=cevcSISecondaryVlanEndingVlan, cevcSIForwardBdNumber4kBitmap=cevcSIForwardBdNumber4kBitmap, cevcPortEntry=cevcPortEntry, cevcSIPrimaryVlanBeginningVlan=cevcSIPrimaryVlanBeginningVlan, ServiceInstanceInterface=ServiceInstanceInterface, cevcSIForwardBdNumber=cevcSIForwardBdNumber, cevcSIMatchEncapPrimaryCos=cevcSIMatchEncapPrimaryCos, cevcEvcTable=cevcEvcTable, cevcSIForwardBdStorageType=cevcSIForwardBdStorageType, cevcSIGroupRev1=cevcSIGroupRev1, cevcEvcNotificationConfig=cevcEvcNotificationConfig, cevcSIEntry=cevcSIEntry, cevcSIMatchStorageType=cevcSIMatchStorageType)
| pysnmp-with-texts/CISCO-EVC-MIB.py | 122,691 | PySNMP MIB module CISCO-EVC-MIB (http://snmplabs.com/pysmi) ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-EVC-MIB Produced by pysmi-0.3.4 at Wed May 1 11:57:43 2019 On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) | 320 | en | 0.370429 |
# -*- coding: utf-8 -*-
from django.db import models
from django.shortcuts import render,redirect
from django.views import View
from django.contrib.auth import authenticate, login , logout as django_logout
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.http import Http404, HttpResponseRedirect
from django.core.paginator import Paginator
import django_filters
from django import forms
from .models import B2BRecord
# Create your views here.
class LendRecordFilter(django_filters.FilterSet):
created = django_filters.DateFromToRangeFilter(
widget=forms.SplitDateTimeWidget(
attrs={
'class':'datepicker',
'type':'date',
}
)
)
class Meta:
model = B2BRecord
fields = ['depot','created']
class LendRecordListView(View):
def get(self,request):
ft = LendRecordFilter(request.GET)
merchandises = ft.qs
paginator = Paginator(merchandises, 15)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
return render(request,'lend_record_list.html',{'records':page_obj,'filter':ft}) | TBS/b2b/views.py | 1,101 | -*- coding: utf-8 -*- Create your views here. | 45 | en | 0.897093 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a self-generating script that contains all of the iso3166-1 data.
To regenerate, a CSV file must be created that contains the latest data. Here's
how to do that:
1. Visit https://www.iso.org/obp
2. Click the "Country Codes" radio option and click the search button
3. Filter by "Officially assigned codes"
4. Change the results per page to 300
5. Copy the html table and paste into Libreoffice Calc / Excel
6. Delete the French name column
7. Save as a CSV file in django_countries/iso3166-1.csv
8. Run this script from the command line
"""
from __future__ import unicode_literals
import glob
import os
try:
from django.utils.translation import ugettext_lazy as _
except ImportError: # pragma: no cover
# Allows this module to be executed without Django installed.
_ = lambda x: x
COMMON_NAMES = {
"BN": _("Brunei"),
"BO": _("Bolivia"),
"GB": _("United Kingdom"),
"IR": _("Iran"),
"KP": _("North Korea"),
"KR": _("South Korea"),
"LA": _("Laos"),
"MD": _("Moldova"),
"MK": _("Macedonia"),
"RU": _("Russia"),
"SY": _("Syria"),
"TW": _("Taiwan"),
"TZ": _("Tanzania"),
"VE": _("Venezuela"),
"VN": _("Vietnam"),
}
# Nicely titled (and translatable) country names.
COUNTRIES = {
"AF": _("Afghanistan"),
"AX": _("Åland Islands"),
"AL": _("Albania"),
"DZ": _("Algeria"),
"AS": _("American Samoa"),
"AD": _("Andorra"),
"AO": _("Angola"),
"AI": _("Anguilla"),
"AQ": _("Antarctica"),
"AG": _("Antigua and Barbuda"),
"AR": _("Argentina"),
"AM": _("Armenia"),
"AW": _("Aruba"),
"AU": _("Australia"),
"AT": _("Austria"),
"AZ": _("Azerbaijan"),
"BS": _("Bahamas"),
"BH": _("Bahrain"),
"BD": _("Bangladesh"),
"BB": _("Barbados"),
"BY": _("Belarus"),
"BE": _("Belgium"),
"BZ": _("Belize"),
"BJ": _("Benin"),
"BM": _("Bermuda"),
"BT": _("Bhutan"),
"BO": _("Bolivia (Plurinational State of)"),
"BQ": _("Bonaire, Sint Eustatius and Saba"),
"BA": _("Bosnia and Herzegovina"),
"BW": _("Botswana"),
"BV": _("Bouvet Island"),
"BR": _("Brazil"),
"IO": _("British Indian Ocean Territory"),
"BN": _("Brunei Darussalam"),
"BG": _("Bulgaria"),
"BF": _("Burkina Faso"),
"BI": _("Burundi"),
"CV": _("Cabo Verde"),
"KH": _("Cambodia"),
"CM": _("Cameroon"),
"CA": _("Canada"),
"KY": _("Cayman Islands"),
"CF": _("Central African Republic"),
"TD": _("Chad"),
"CL": _("Chile"),
"CN": _("China"),
"CX": _("Christmas Island"),
"CC": _("Cocos (Keeling) Islands"),
"CO": _("Colombia"),
"KM": _("Comoros"),
"CD": _("Congo (the Democratic Republic of the)"),
"CG": _("Congo"),
"CK": _("Cook Islands"),
"CR": _("Costa Rica"),
"CI": _("Côte d'Ivoire"),
"HR": _("Croatia"),
"CU": _("Cuba"),
"CW": _("Curaçao"),
"CY": _("Cyprus"),
"CZ": _("Czechia"),
"DK": _("Denmark"),
"DJ": _("Djibouti"),
"DM": _("Dominica"),
"DO": _("Dominican Republic"),
"EC": _("Ecuador"),
"EG": _("Egypt"),
"SV": _("El Salvador"),
"GQ": _("Equatorial Guinea"),
"ER": _("Eritrea"),
"EE": _("Estonia"),
"ET": _("Ethiopia"),
"FK": _("Falkland Islands [Malvinas]"),
"FO": _("Faroe Islands"),
"FJ": _("Fiji"),
"FI": _("Finland"),
"FR": _("France"),
"GF": _("French Guiana"),
"PF": _("French Polynesia"),
"TF": _("French Southern Territories"),
"GA": _("Gabon"),
"GM": _("Gambia"),
"GE": _("Georgia"),
"DE": _("Germany"),
"GH": _("Ghana"),
"GI": _("Gibraltar"),
"GR": _("Greece"),
"GL": _("Greenland"),
"GD": _("Grenada"),
"GP": _("Guadeloupe"),
"GU": _("Guam"),
"GT": _("Guatemala"),
"GG": _("Guernsey"),
"GN": _("Guinea"),
"GW": _("Guinea-Bissau"),
"GY": _("Guyana"),
"HT": _("Haiti"),
"HM": _("Heard Island and McDonald Islands"),
"VA": _("Holy See"),
"HN": _("Honduras"),
"HK": _("Hong Kong"),
"HU": _("Hungary"),
"IS": _("Iceland"),
"IN": _("India"),
"ID": _("Indonesia"),
"IR": _("Iran (Islamic Republic of)"),
"IQ": _("Iraq"),
"IE": _("Ireland"),
"IM": _("Isle of Man"),
"IL": _("Israel"),
"IT": _("Italy"),
"JM": _("Jamaica"),
"JP": _("Japan"),
"JE": _("Jersey"),
"JO": _("Jordan"),
"KZ": _("Kazakhstan"),
"KE": _("Kenya"),
"KI": _("Kiribati"),
"KP": _("Korea (the Democratic People's Republic of)"),
"KR": _("Korea (the Republic of)"),
"KW": _("Kuwait"),
"KG": _("Kyrgyzstan"),
"LA": _("Lao People's Democratic Republic"),
"LV": _("Latvia"),
"LB": _("Lebanon"),
"LS": _("Lesotho"),
"LR": _("Liberia"),
"LY": _("Libya"),
"LI": _("Liechtenstein"),
"LT": _("Lithuania"),
"LU": _("Luxembourg"),
"MO": _("Macao"),
"MK": _("Macedonia (the former Yugoslav Republic of)"),
"MG": _("Madagascar"),
"MW": _("Malawi"),
"MY": _("Malaysia"),
"MV": _("Maldives"),
"ML": _("Mali"),
"MT": _("Malta"),
"MH": _("Marshall Islands"),
"MQ": _("Martinique"),
"MR": _("Mauritania"),
"MU": _("Mauritius"),
"YT": _("Mayotte"),
"MX": _("Mexico"),
"FM": _("Micronesia (Federated States of)"),
"MD": _("Moldova (the Republic of)"),
"MC": _("Monaco"),
"MN": _("Mongolia"),
"ME": _("Montenegro"),
"MS": _("Montserrat"),
"MA": _("Morocco"),
"MZ": _("Mozambique"),
"MM": _("Myanmar"),
"NA": _("Namibia"),
"NR": _("Nauru"),
"NP": _("Nepal"),
"NL": _("Netherlands"),
"NC": _("New Caledonia"),
"NZ": _("New Zealand"),
"NI": _("Nicaragua"),
"NE": _("Niger"),
"NG": _("Nigeria"),
"NU": _("Niue"),
"NF": _("Norfolk Island"),
"MP": _("Northern Mariana Islands"),
"NO": _("Norway"),
"OM": _("Oman"),
"PK": _("Pakistan"),
"PW": _("Palau"),
"PS": _("Palestine, State of"),
"PA": _("Panama"),
"PG": _("Papua New Guinea"),
"PY": _("Paraguay"),
"PE": _("Peru"),
"PH": _("Philippines"),
"PN": _("Pitcairn"),
"PL": _("Poland"),
"PT": _("Portugal"),
"PR": _("Puerto Rico"),
"QA": _("Qatar"),
"RE": _("Réunion"),
"RO": _("Romania"),
"RU": _("Russian Federation"),
"RW": _("Rwanda"),
"BL": _("Saint Barthélemy"),
"SH": _("Saint Helena, Ascension and Tristan da Cunha"),
"KN": _("Saint Kitts and Nevis"),
"LC": _("Saint Lucia"),
"MF": _("Saint Martin (French part)"),
"PM": _("Saint Pierre and Miquelon"),
"VC": _("Saint Vincent and the Grenadines"),
"WS": _("Samoa"),
"SM": _("San Marino"),
"ST": _("Sao Tome and Principe"),
"SA": _("Saudi Arabia"),
"SN": _("Senegal"),
"RS": _("Serbia"),
"SC": _("Seychelles"),
"SL": _("Sierra Leone"),
"SG": _("Singapore"),
"SX": _("Sint Maarten (Dutch part)"),
"SK": _("Slovakia"),
"SI": _("Slovenia"),
"SB": _("Solomon Islands"),
"SO": _("Somalia"),
"ZA": _("South Africa"),
"GS": _("South Georgia and the South Sandwich Islands"),
"SS": _("South Sudan"),
"ES": _("Spain"),
"LK": _("Sri Lanka"),
"SD": _("Sudan"),
"SR": _("Suriname"),
"SJ": _("Svalbard and Jan Mayen"),
"SZ": _("Swaziland"),
"SE": _("Sweden"),
"CH": _("Switzerland"),
"SY": _("Syrian Arab Republic"),
"TW": _("Taiwan (Province of China)"),
"TJ": _("Tajikistan"),
"TZ": _("Tanzania, United Republic of"),
"TH": _("Thailand"),
"TL": _("Timor-Leste"),
"TG": _("Togo"),
"TK": _("Tokelau"),
"TO": _("Tonga"),
"TT": _("Trinidad and Tobago"),
"TN": _("Tunisia"),
"TR": _("Turkey"),
"TM": _("Turkmenistan"),
"TC": _("Turks and Caicos Islands"),
"TV": _("Tuvalu"),
"UG": _("Uganda"),
"UA": _("Ukraine"),
"AE": _("United Arab Emirates"),
"GB": _("United Kingdom of Great Britain and Northern Ireland"),
"UM": _("United States Minor Outlying Islands"),
"US": _("United States of America"),
"UY": _("Uruguay"),
"UZ": _("Uzbekistan"),
"VU": _("Vanuatu"),
"VE": _("Venezuela (Bolivarian Republic of)"),
"VN": _("Viet Nam"),
"VG": _("Virgin Islands (British)"),
"VI": _("Virgin Islands (U.S.)"),
"WF": _("Wallis and Futuna"),
"EH": _("Western Sahara"),
"YE": _("Yemen"),
"ZM": _("Zambia"),
"ZW": _("Zimbabwe"),
}
ALT_CODES = {
"AF": ("AFG", 4),
"AX": ("ALA", 248),
"AL": ("ALB", 8),
"DZ": ("DZA", 12),
"AS": ("ASM", 16),
"AD": ("AND", 20),
"AO": ("AGO", 24),
"AI": ("AIA", 660),
"AQ": ("ATA", 10),
"AG": ("ATG", 28),
"AR": ("ARG", 32),
"AM": ("ARM", 51),
"AW": ("ABW", 533),
"AU": ("AUS", 36),
"AT": ("AUT", 40),
"AZ": ("AZE", 31),
"BS": ("BHS", 44),
"BH": ("BHR", 48),
"BD": ("BGD", 50),
"BB": ("BRB", 52),
"BY": ("BLR", 112),
"BE": ("BEL", 56),
"BZ": ("BLZ", 84),
"BJ": ("BEN", 204),
"BM": ("BMU", 60),
"BT": ("BTN", 64),
"BO": ("BOL", 68),
"BQ": ("BES", 535),
"BA": ("BIH", 70),
"BW": ("BWA", 72),
"BV": ("BVT", 74),
"BR": ("BRA", 76),
"IO": ("IOT", 86),
"BN": ("BRN", 96),
"BG": ("BGR", 100),
"BF": ("BFA", 854),
"BI": ("BDI", 108),
"CV": ("CPV", 132),
"KH": ("KHM", 116),
"CM": ("CMR", 120),
"CA": ("CAN", 124),
"KY": ("CYM", 136),
"CF": ("CAF", 140),
"TD": ("TCD", 148),
"CL": ("CHL", 152),
"CN": ("CHN", 156),
"CX": ("CXR", 162),
"CC": ("CCK", 166),
"CO": ("COL", 170),
"KM": ("COM", 174),
"CD": ("COD", 180),
"CG": ("COG", 178),
"CK": ("COK", 184),
"CR": ("CRI", 188),
"CI": ("CIV", 384),
"HR": ("HRV", 191),
"CU": ("CUB", 192),
"CW": ("CUW", 531),
"CY": ("CYP", 196),
"CZ": ("CZE", 203),
"DK": ("DNK", 208),
"DJ": ("DJI", 262),
"DM": ("DMA", 212),
"DO": ("DOM", 214),
"EC": ("ECU", 218),
"EG": ("EGY", 818),
"SV": ("SLV", 222),
"GQ": ("GNQ", 226),
"ER": ("ERI", 232),
"EE": ("EST", 233),
"ET": ("ETH", 231),
"FK": ("FLK", 238),
"FO": ("FRO", 234),
"FJ": ("FJI", 242),
"FI": ("FIN", 246),
"FR": ("FRA", 250),
"GF": ("GUF", 254),
"PF": ("PYF", 258),
"TF": ("ATF", 260),
"GA": ("GAB", 266),
"GM": ("GMB", 270),
"GE": ("GEO", 268),
"DE": ("DEU", 276),
"GH": ("GHA", 288),
"GI": ("GIB", 292),
"GR": ("GRC", 300),
"GL": ("GRL", 304),
"GD": ("GRD", 308),
"GP": ("GLP", 312),
"GU": ("GUM", 316),
"GT": ("GTM", 320),
"GG": ("GGY", 831),
"GN": ("GIN", 324),
"GW": ("GNB", 624),
"GY": ("GUY", 328),
"HT": ("HTI", 332),
"HM": ("HMD", 334),
"VA": ("VAT", 336),
"HN": ("HND", 340),
"HK": ("HKG", 344),
"HU": ("HUN", 348),
"IS": ("ISL", 352),
"IN": ("IND", 356),
"ID": ("IDN", 360),
"IR": ("IRN", 364),
"IQ": ("IRQ", 368),
"IE": ("IRL", 372),
"IM": ("IMN", 833),
"IL": ("ISR", 376),
"IT": ("ITA", 380),
"JM": ("JAM", 388),
"JP": ("JPN", 392),
"JE": ("JEY", 832),
"JO": ("JOR", 400),
"KZ": ("KAZ", 398),
"KE": ("KEN", 404),
"KI": ("KIR", 296),
"KP": ("PRK", 408),
"KR": ("KOR", 410),
"KW": ("KWT", 414),
"KG": ("KGZ", 417),
"LA": ("LAO", 418),
"LV": ("LVA", 428),
"LB": ("LBN", 422),
"LS": ("LSO", 426),
"LR": ("LBR", 430),
"LY": ("LBY", 434),
"LI": ("LIE", 438),
"LT": ("LTU", 440),
"LU": ("LUX", 442),
"MO": ("MAC", 446),
"MK": ("MKD", 807),
"MG": ("MDG", 450),
"MW": ("MWI", 454),
"MY": ("MYS", 458),
"MV": ("MDV", 462),
"ML": ("MLI", 466),
"MT": ("MLT", 470),
"MH": ("MHL", 584),
"MQ": ("MTQ", 474),
"MR": ("MRT", 478),
"MU": ("MUS", 480),
"YT": ("MYT", 175),
"MX": ("MEX", 484),
"FM": ("FSM", 583),
"MD": ("MDA", 498),
"MC": ("MCO", 492),
"MN": ("MNG", 496),
"ME": ("MNE", 499),
"MS": ("MSR", 500),
"MA": ("MAR", 504),
"MZ": ("MOZ", 508),
"MM": ("MMR", 104),
"NA": ("NAM", 516),
"NR": ("NRU", 520),
"NP": ("NPL", 524),
"NL": ("NLD", 528),
"NC": ("NCL", 540),
"NZ": ("NZL", 554),
"NI": ("NIC", 558),
"NE": ("NER", 562),
"NG": ("NGA", 566),
"NU": ("NIU", 570),
"NF": ("NFK", 574),
"MP": ("MNP", 580),
"NO": ("NOR", 578),
"OM": ("OMN", 512),
"PK": ("PAK", 586),
"PW": ("PLW", 585),
"PS": ("PSE", 275),
"PA": ("PAN", 591),
"PG": ("PNG", 598),
"PY": ("PRY", 600),
"PE": ("PER", 604),
"PH": ("PHL", 608),
"PN": ("PCN", 612),
"PL": ("POL", 616),
"PT": ("PRT", 620),
"PR": ("PRI", 630),
"QA": ("QAT", 634),
"RE": ("REU", 638),
"RO": ("ROU", 642),
"RU": ("RUS", 643),
"RW": ("RWA", 646),
"BL": ("BLM", 652),
"SH": ("SHN", 654),
"KN": ("KNA", 659),
"LC": ("LCA", 662),
"MF": ("MAF", 663),
"PM": ("SPM", 666),
"VC": ("VCT", 670),
"WS": ("WSM", 882),
"SM": ("SMR", 674),
"ST": ("STP", 678),
"SA": ("SAU", 682),
"SN": ("SEN", 686),
"RS": ("SRB", 688),
"SC": ("SYC", 690),
"SL": ("SLE", 694),
"SG": ("SGP", 702),
"SX": ("SXM", 534),
"SK": ("SVK", 703),
"SI": ("SVN", 705),
"SB": ("SLB", 90),
"SO": ("SOM", 706),
"ZA": ("ZAF", 710),
"GS": ("SGS", 239),
"SS": ("SSD", 728),
"ES": ("ESP", 724),
"LK": ("LKA", 144),
"SD": ("SDN", 729),
"SR": ("SUR", 740),
"SJ": ("SJM", 744),
"SZ": ("SWZ", 748),
"SE": ("SWE", 752),
"CH": ("CHE", 756),
"SY": ("SYR", 760),
"TW": ("TWN", 158),
"TJ": ("TJK", 762),
"TZ": ("TZA", 834),
"TH": ("THA", 764),
"TL": ("TLS", 626),
"TG": ("TGO", 768),
"TK": ("TKL", 772),
"TO": ("TON", 776),
"TT": ("TTO", 780),
"TN": ("TUN", 788),
"TR": ("TUR", 792),
"TM": ("TKM", 795),
"TC": ("TCA", 796),
"TV": ("TUV", 798),
"UG": ("UGA", 800),
"UA": ("UKR", 804),
"AE": ("ARE", 784),
"GB": ("GBR", 826),
"UM": ("UMI", 581),
"US": ("USA", 840),
"UY": ("URY", 858),
"UZ": ("UZB", 860),
"VU": ("VUT", 548),
"VE": ("VEN", 862),
"VN": ("VNM", 704),
"VG": ("VGB", 92),
"VI": ("VIR", 850),
"WF": ("WLF", 876),
"EH": ("ESH", 732),
"YE": ("YEM", 887),
"ZM": ("ZMB", 894),
"ZW": ("ZWE", 716),
}
def self_generate(
output_filename, filename='iso3166-1.csv'): # pragma: no cover
"""
The following code can be used for self-generation of this file.
It requires a UTF-8 CSV file containing the short ISO name and two letter
country code as the first two columns.
"""
import csv
import re
countries = []
alt_codes = []
with open(filename, 'rb') as csv_file:
for row in csv.reader(csv_file):
name = row[0].decode('utf-8').rstrip('*')
name = re.sub(r'\(the\)', '', name)
if name:
countries.append((name, row[1].decode('utf-8')))
alt_codes.append((
row[1].decode('utf-8'),
row[2].decode('utf-8'),
int(row[3]),
))
with open(__file__, 'r') as source_file:
contents = source_file.read()
# Write countries.
bits = re.match(
'(.*\nCOUNTRIES = \{\n)(.*?)(\n\}.*)', contents, re.DOTALL).groups()
country_list = []
for name, code in countries:
name = name.replace('"', r'\"').strip()
country_list.append(
' "{code}": _("{name}"),'.format(name=name, code=code))
content = bits[0]
content += '\n'.join(country_list).encode('utf-8')
# Write alt codes.
alt_bits = re.match(
'(.*\nALT_CODES = \{\n)(.*)(\n\}.*)', bits[2], re.DOTALL).groups()
alt_list = []
for code, code3, codenum in alt_codes:
name = name.replace('"', r'\"').strip()
alt_list.append(
' "{code}": ("{code3}", {codenum}),'.format(
code=code, code3=code3, codenum=codenum))
content += alt_bits[0]
content += '\n'.join(alt_list).encode('utf-8')
content += alt_bits[2]
# Generate file.
with open(output_filename, 'wb') as output_file:
output_file.write(content)
return countries
def check_flags(verbosity=1):
files = {}
this_dir = os.path.dirname(__file__)
for path in glob.glob(os.path.join(this_dir, 'static', 'flags', '*.gif')):
files[os.path.basename(os.path.splitext(path)[0]).upper()] = path
flags_missing = set(COUNTRIES) - set(files)
if flags_missing: # pragma: no cover
print("The following country codes are missing a flag:")
for code in sorted(flags_missing):
print(" {0} ({1})".format(code, COUNTRIES[code]))
elif verbosity: # pragma: no cover
print("All country codes have flags. :)")
code_missing = set(files) - set(COUNTRIES)
# Special-case EU and __
for special_code in ('EU', '__'):
code_missing.discard(special_code)
if code_missing: # pragma: no cover
print("")
print("The following flags don't have a matching country code:")
for path in sorted(code_missing):
print(" {0}".format(path))
def check_common_names():
common_names_missing = set(COMMON_NAMES) - set(COUNTRIES)
if common_names_missing: # pragma: no cover
print("")
print(
"The following common names do not match an official country "
"code:")
for code in sorted(common_names_missing):
print(" {0}".format(code))
if __name__ == '__main__': # pragma: no cover
countries = self_generate(__file__)
print('Wrote {0} countries.'.format(len(countries)))
print("")
check_flags()
check_common_names()
| django_countries/data.py | 17,818 | The following code can be used for self-generation of this file.
It requires a UTF-8 CSV file containing the short ISO name and two letter
country code as the first two columns.
This is a self-generating script that contains all of the iso3166-1 data.
To regenerate, a CSV file must be created that contains the latest data. Here's
how to do that:
1. Visit https://www.iso.org/obp
2. Click the "Country Codes" radio option and click the search button
3. Filter by "Officially assigned codes"
4. Change the results per page to 300
5. Copy the html table and paste into Libreoffice Calc / Excel
6. Delete the French name column
7. Save as a CSV file in django_countries/iso3166-1.csv
8. Run this script from the command line
!/usr/bin/env python -*- coding: utf-8 -*- pragma: no cover Allows this module to be executed without Django installed. Nicely titled (and translatable) country names. pragma: no cover Write countries. Write alt codes. Generate file. pragma: no cover pragma: no cover Special-case EU and __ pragma: no cover pragma: no cover pragma: no cover | 1,068 | en | 0.809766 |
import unittest
from tori.decorator.common import *
class TestDecoratorCommonSingletonClass(unittest.TestCase):
""" Test the 'singleton' decorator. """
class DummyTest(object):
def __init__(self):
self.number = 0
def take_action(self):
self.number += 1
def get_number(self):
return self.number
def test_positive_without_instance_attr(self):
""" Test if the target class without a singleton attribute. """
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest): pass
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
# Test for the type.
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
# Test if it is working. (case #1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
# Test if it is working. (case #n)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
def test_positive_using_decorator_with_primitive_parameters(self):
""" Test if the target class without a singleton attribute but using a decorator with primitive parameters. """
try:
@singleton(10)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, init_number):
super(self.__class__, self).__init__()
self.number = init_number
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
# Test for the type.
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
# Test if it is working. (case #1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 11)
# Test if it is working. (case #n)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 12)
def test_positive_for_normal_singleton_with_parameters(self):
""" Positive test for @singleton with parameters provided for the constructor """
try:
class SampleDependencyInjection(object): pass
sample_di = SampleDependencyInjection()
@singleton(sample_di)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
# Test for the type.
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
# Test if it is working. (case #1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
# Test if it is working. (case #n)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
# Test if the dependency injection is working.
self.assertIsInstance(SuperDummyClass.instance().dependency_injection, SampleDependencyInjection)
def test_negative_for_normal_singleton_with_class_reference(self):
""" Negative test for @singleton with class_reference provided for the constructor """
# Note that this test case shows the limitation of the decorator which
# can't take a class reference as a parameter. Strongly recommend to
# use @singleton_with as it is more powerful.
try:
class SampleDependencyInjection(object): pass
@singleton(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection
self.assertTrue(False, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known-yet-unexpected exception.')
except TypeError:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.')
def test_positive_for_singleton_with(self):
""" Positive test for @singleton_with(*args, **kwargs) """
# Note that this test case shows the limitation of the decorator which
# can't take a class reference as a parameter. Strongly recommend to
# use @singleton_with as it is more powerful.
try:
class SampleDependencyInjection(object): pass
@singleton_with(SampleDependencyInjection)
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
def __init__(self, dependency_injection):
super(self.__class__, self).__init__()
self.dependency_injection = dependency_injection()
self.assertTrue(True, 'Singleton Class: Passed the initialization as expected.')
except SingletonInitializationException:
self.assertTrue(False, 'Singleton Class: Failed the initialization with known exception.')
# Test for the type.
self.assertIsInstance(SuperDummyClass.instance(), SuperDummyClass)
# Test if it is working. (case #1)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 1)
# Test if it is working. (case #n)
SuperDummyClass.instance().take_action()
self.assertEqual(SuperDummyClass.instance().get_number(), 2)
# Test if the dependency injection is working.
self.assertIsInstance(SuperDummyClass.instance().dependency_injection, SampleDependencyInjection)
def test_negative_with_existed_singleton_instance(self):
""" Test if the target class is with null singleton attribute. """
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = None
def __init__(self):
# Use `self.__class__` to call the parent class' constructor.
super(self.__class__, self).__init__()
self.assertTrue(False, 'Singleton Class: Passed the initialization unexpectedly.')
except SingletonInitializationException:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.')
def test_negative_with_unexpected_instance_attr(self):
""" Test if the target class has already had an attribute `_singleton_instance` but it is not compatible. """
try:
@singleton
class SuperDummyClass(TestDecoratorCommonSingletonClass.DummyTest):
_singleton_instance = {}
def __init__(self):
# Use `self.__class__` to call the parent class' constructor.
super(self.__class__, self).__init__()
self.assertTrue(False, 'Singleton Class: Passed the initialization unexpectedly.')
except SingletonInitializationException:
self.assertTrue(True, 'Singleton Class: Failed the initialization with expected exception.')
if __name__ == '__main__':
unittest.main() | test/ut/test_decorator_common_singleton.py | 8,169 | Test the 'singleton' decorator.
Negative test for @singleton with class_reference provided for the constructor
Test if the target class is with null singleton attribute.
Test if the target class has already had an attribute `_singleton_instance` but it is not compatible.
Positive test for @singleton with parameters provided for the constructor
Positive test for @singleton_with(*args, **kwargs)
Test if the target class without a singleton attribute but using a decorator with primitive parameters.
Test if the target class without a singleton attribute.
Test for the type. Test if it is working. (case 1) Test if it is working. (case n) Test for the type. Test if it is working. (case 1) Test if it is working. (case n) Test for the type. Test if it is working. (case 1) Test if it is working. (case n) Test if the dependency injection is working. Note that this test case shows the limitation of the decorator which can't take a class reference as a parameter. Strongly recommend to use @singleton_with as it is more powerful. Note that this test case shows the limitation of the decorator which can't take a class reference as a parameter. Strongly recommend to use @singleton_with as it is more powerful. Test for the type. Test if it is working. (case 1) Test if it is working. (case n) Test if the dependency injection is working. Use `self.__class__` to call the parent class' constructor. Use `self.__class__` to call the parent class' constructor. | 1,468 | en | 0.851087 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.