text
stringlengths 65
6.05M
| lang
stringclasses 8
values | type
stringclasses 2
values | id
stringlengths 64
64
|
|---|---|---|---|
from __future__ import print_function
import argparse
from six.moves import configparser
import logging
import logging.config
import sys
import six
from tabulate import tabulate
class BaseCommand(object):
"""Base class for command-line tools."""
default_config_section = 'caravan'
CHILD_POLICIES = [
'TERMINATE',
'REQUEST_CANCEL',
'ABANDON',
]
@classmethod
def main(cls):
"""Setuptools console-script entrypoint"""
cmd = cls()
cmd._parse_args()
cmd._setup_logging()
response = cmd._run()
output = cmd._handle_response(response)
if output is not None:
print(output)
def _parse_args(self):
args = sys.argv[1:]
# Config only parser
config_parser = argparse.ArgumentParser(description=self.description,
add_help=False)
config_parser.add_argument('-c', '--config',
help='config file for setup.')
config_parser.add_argument('--config-section',
default=self.default_config_section,
help='section of the config file for '
'setup.')
config_args, remaining_args = config_parser.parse_known_args()
# Full parser
parser = argparse.ArgumentParser(description=self.description,
parents=[config_parser])
self._setup_base_arguments(parser)
self.setup_arguments(parser)
# Read defaults from config file
if config_args.config:
cp = configparser.RawConfigParser()
with open(config_args.config) as fp:
cp.readfp(fp)
config_items = cp.items(config_args.config_section)
valid_options = [option_string
for action in parser._actions
for option_string in action.option_strings]
nargs_options = {option_string: action.nargs
for action in parser._actions
for option_string in action.option_strings}
for option, value in config_items:
option_string = '--%s' % option
if option_string in valid_options:
if nargs_options.get(option_string) == '+':
value = value.split()
option_args = [option_string] + value
else:
option_args = [option_string, value]
args.extend(option_args)
self.args = parser.parse_args(args)
def _setup_base_arguments(self, parser):
parser.add_argument('--logging-config',
dest='logging_config',
help='Optional config file for logging.'
' Default to config_uri')
parser.add_argument('--verbose',
dest='logging_level',
default=logging.WARNING,
action='store_const',
const=logging.INFO)
parser.add_argument('--debug',
dest='logging_level',
default=logging.WARNING,
action='store_const',
const=logging.DEBUG)
def _setup_logging(self):
if self.args.logging_config:
logging.config.fileConfig(self.args.logging_config)
elif self.args.config:
try:
logging.config.fileConfig(self.args.config)
except (configparser.NoSectionError, KeyError):
pass
else:
logging.basicConfig(level=self.args.logging_level)
def _run(self):
logging.debug('Run command with args: %s', self.args)
try:
return self.run()
except KeyboardInterrupt:
sys.exit(1)
def _handle_response(self, response):
if response is None:
return "Success."
elif isinstance(response, six.string_types):
return response
elif isinstance(response, list):
if len(response) == 0:
return 'No results.'
if hasattr(self, 'formatter'):
response = map(self.formatter, response)
return tabulate(response, headers='keys', tablefmt="plain")
|
Python
|
CL
|
bc4638f8497b22dde4c33b1f498dd2eab4b5e0e14e077b1ed992871664a389b7
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
from azure.cli.core.azclierror import ValidationError
from knack.util import CLIError
from knack.log import get_logger
from ._constants import MIN_GA_VERSION, GA_CONTAINERAPP_EXTENSION_NAME
logger = get_logger(__name__)
def is_containerapp_extension_available():
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
from packaging.version import parse
try:
ext = get_extension(GA_CONTAINERAPP_EXTENSION_NAME)
# Check extension version
if ext and parse(ext.version) < parse(MIN_GA_VERSION):
return False
except ExtensionNotInstalledException:
return False
return True
def _get_azext_containerapp_module(module_name):
try:
if not is_containerapp_extension_available():
raise ValidationError(f"The command requires the version of {GA_CONTAINERAPP_EXTENSION_NAME} >= {MIN_GA_VERSION}. Run 'az extension add --upgrade -n {GA_CONTAINERAPP_EXTENSION_NAME}' to install extension")
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(GA_CONTAINERAPP_EXTENSION_NAME)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
# need to reload preview's _help, because the containerapp's _help will overwrite the preview's _help after importing the containerapp module.
from azext_containerapp_preview import _help
from importlib import reload
reload(_help)
return azext_custom
except ImportError as ie:
raise CLIError(ie) from ie
def auto_install_containerapp_extension_if_not_exist(cmd):
from azure.cli.core.extension import extension_exists
if not extension_exists(GA_CONTAINERAPP_EXTENSION_NAME):
_install_containerapp_extension(cmd, GA_CONTAINERAPP_EXTENSION_NAME)
def _install_containerapp_extension(cmd, extension_name, upgrade=False):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name, upgrade=upgrade)
except Exception: # nopa pylint: disable=broad-except
return False
return True
|
Python
|
CL
|
cb98480a20208b94642cffeff349e2f4ce79643b2a44719737b532f8c30aa223
|
import torch
import torch.nn as nn
import torch.optim as optim
import os
from tqdm import tqdm
from torch.utils.data import DataLoader
from pytorch_pretrained_bert.modeling import BertModel
from src.EDU.dataset import RlatCollator, AugCollator
try:
torch.multiprocessing.set_start_method("spawn")
except RuntimeError:
pass
tag_to_ix_relation = {'causality': 0, 'coordination': 1, 'transition': 2, 'explanation': 3}
tag_to_ix_center = {'1': 0, '2': 1, '3': 2, '4': 3}
# tag_to_ix_center = {'1':0, '2':1, '3':2}
class NetRlat(nn.Module):
def __init__(self, embedding_dim, tagset_size_center, tagset_size_relation, batch_size):
super(NetRlat, self).__init__()
self.embedding_dim = embedding_dim # 768(bert)
self.tagset_size_center = tagset_size_center # center label size
self.tagset_size_relation = tagset_size_relation # relation label size
self.batch_size = batch_size
# EDU
# self.bert = BertModel.from_pretrained('bert-base-multilingual-cased').cpu()
self.bert = BertModel.from_pretrained('bert-base-chinese').cpu()
# freeze
# for param in self.bert.parameters():
# param.requires_grad = False
self.dropout = nn.Dropout(0.1)
self.hidden2tag_center = nn.Linear(embedding_dim * 2, self.tagset_size_center)
self.hidden2tag_relation = nn.Linear(embedding_dim * 2, self.tagset_size_relation)
self.logsoftmax = nn.LogSoftmax(dim=-1)
self.softmax = nn.Softmax(dim=-1)
def forward(self, input_ids1, input_ids2, aug_flag=None, input_mask1=None, input_mask2=None, labels=None):
'''
Args:
input_ids: [batch_size, seq_length]
'''
# use sliding window approach to deal with EDU length 512 restriction
# use narrow(dimension, start, length) to slice tensor
out_list = []
for input_ids in [input_ids1, input_ids2]:
input_ids_list = []
attention_mask_list = []
if input_ids.size()[1] > 512:
for i in range(0, input_ids.size()[1], 256): # step size : 256
step = 512 if (i + 512 <= input_ids.size()[1]) else input_ids.size()[1] - i
input_ids_list.append(input_ids.narrow(1, i, step))
# attention_mask_list.append(attention_mask.narrow(1, i, step))
# send to EDU sequentially
sequence_output_list = []
for idx in range(0, len(input_ids_list)):
# sequence_output, _ = self.bert(input_ids_list[idx], attention_mask_list[idx], output_all_encoded_layers=False)
sequence_output, _ = self.bert(input_ids_list[idx], output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
sequence_output_list.append(sequence_output)
# combine by average the overlapping part
sequence_output = []
for i in range(0, len(sequence_output_list) - 1):
if i == 0:
sequence_output.append(sequence_output_list[i][:, :256, :])
sequence_output.append(
(sequence_output_list[i][:, 256:, :] + sequence_output_list[i + 1][:, :256, :]) / 2)
sequence_output = torch.cat(sequence_output, 1)
else:
sequence_output, _ = self.bert(input_ids, output_all_encoded_layers=False)
sequence_output = self.dropout(sequence_output)
out_list.append(sequence_output)
pooled_list = []
for out in out_list:
pooled_list.append(out.max(1)[0])
pooled = torch.cat(pooled_list, 1)
# after combination, use a linear layer to reduce hidden dimension to tagset size
center = self.hidden2tag_center(pooled)
relation = self.hidden2tag_relation(pooled)
if aug_flag == True:
center = self.softmax(center)
relation = self.softmax(relation)
elif aug_flag == False:
center = self.logsoftmax(center)
relation = self.logsoftmax(relation)
else:
center = center
relation = relation
return center, relation
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=2, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.criterion = nn.CrossEntropyLoss(reduction='none').cpu()
self.reduce = reduce
def forward(self, inputs, targets):
CE_loss = self.criterion(inputs, targets)
pt = torch.exp(-CE_loss)
F_loss = self.alpha * (1 - pt) ** self.gamma * CE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
class ModelRlat():
def __init__(self, train_data, test_data, aug_data, valid_data, embedding_dim, tagset_size_center,
tagset_size_relation, tagset_size_sub_label, batch_size, k_fold):
self.train_data = train_data
self.test_data = test_data
self.aug_data = aug_data
self.valid_data = valid_data
self.embedding_dim = embedding_dim # 768(bert)
self.tagset_size_center = tagset_size_center # center label size
self.tagset_size_relation = tagset_size_relation # relation label size
self.batch_size = batch_size
self.k_fold = k_fold
self.model = ModelRlat(self.embedding_dim, self.tagset_size_center, self.tagset_size_relation,
self.batch_size).cpu()
self.optimizer = optim.SGD(self.model.parameters(), lr=5e-5)
# self.optimizer = BertAdam(self.model.parameters(),lr= 1e-4)
self.ce_criterion = nn.CrossEntropyLoss().cpu() # delete ignore_index = 0
# self.ce_criterion = FocalLoss().cpu()
self.kl_criterion = nn.KLDivLoss().cpu()
# self.kl_criterion = nn.MSELoss().cpu()
def train(self):
# indices = list(range(len(self.train_data)))
# np.random.shuffle(indices)
# partitions = list(partition(indices, self.k_fold))
# train_idx = [idx for part in partitions[0:self.k_fold-1] for idx in part]
# valid_idx = partitions[self.k_fold-1]
# # randomly sample from only the indicies given
# train_sampler = SubsetRandomSampler(train_idx)
# valid_sampler = SubsetRandomSampler(valid_idx)
collate_fn_rlat = RlatCollator(train_edu=False, train_trans=False, train_rlat=True)
collate_fn_aug = AugCollator(train_trans=False, train_rlat=True)
train_data = DataLoader(
self.train_data,
batch_size=self.batch_size,
# sampler=train_sampler,
collate_fn=collate_fn_rlat
)
valid_data = DataLoader(
self.valid_data,
batch_size=self.batch_size,
# sampler=valid_sampler,
collate_fn=collate_fn_rlat
)
test_data = DataLoader(
self.test_data,
batch_size=self.batch_size,
collate_fn=collate_fn_rlat
)
aug_data = DataLoader(
self.aug_data,
batch_size=self.batch_size,
collate_fn=collate_fn_aug
)
for epoch in range(15):
running_loss1 = 0.0
running_loss2 = 0.0
running_loss3 = 0.0
running_loss4 = 0.0
self.model.train()
i = 0
trange = tqdm(enumerate(zip(train_data, aug_data)),
total=len(aug_data),
desc='rlat')
for step, (train, aug) in trange:
self.model.zero_grad()
self.optimizer.zero_grad()
# if step == 10:
# break
zh1, zh2, relation, center, sub_label = train[0], train[1], train[2], train[3], train[4]
en1, en2 = aug[0], aug[1]
aug_en1, aug_en2 = aug[4], aug[5]
zh1_torch = torch.tensor(zh1, dtype=torch.long).cpu()
zh2_torch = torch.tensor(zh2, dtype=torch.long).cpu()
relation_torch = torch.tensor([relation], dtype=torch.long).cpu()
center_torch = torch.tensor([center], dtype=torch.long).cpu()
en1_torch = torch.tensor(en1, dtype=torch.long).cpu()
en2_torch = torch.tensor(en2, dtype=torch.long).cpu()
aug_en1_torch = torch.tensor(aug_en1, dtype=torch.long).cpu()
aug_en2_torch = torch.tensor(aug_en2, dtype=torch.long).cpu()
center_zh, relation_zh = self.model(
zh1_torch.view(self.batch_size, -1),
zh2_torch.view(self.batch_size, -1),
)
center_en, relation_en = self.model(
en1_torch.view(self.batch_size, -1),
en2_torch.view(self.batch_size, -1),
aug_flag=False,
)
center_aug_en, relation_aug_en = self.model(
aug_en1_torch.view(self.batch_size, -1),
aug_en2_torch.view(self.batch_size, -1),
aug_flag=True,
)
# supervised cross-entropy loss
ce_relation_loss = self.ce_criterion(
relation_zh.view(self.batch_size, self.model.tagset_size_relation),
relation_torch.view(self.batch_size)
)
ce_center_loss = self.ce_criterion(
center_zh.view(self.batch_size, self.model.tagset_size_center),
center_torch.view(self.batch_size)
)
# unsupervised consistency loss (kl-divergence)
kl_relation_loss = self.kl_criterion(
relation_en.view(self.batch_size, self.model.tagset_size_relation),
relation_aug_en.view(self.batch_size, self.model.tagset_size_relation)
)
kl_center_loss = self.kl_criterion(
center_en.view(self.batch_size, self.model.tagset_size_center),
center_aug_en.view(self.batch_size, self.model.tagset_size_center)
)
# Training Signal Annealing
center_loss = 0.0
relation_loss = 0.0
# center_thresold = (1 - math.exp(-( (step+1) / len(train_data))*5)) * (1 - 1 / self.model.tagset_size_center) + (1 / self.model.tagset_size_center)
# relation_thresold = (1 - math.exp(-( (step+1) / len(train_data))*5)) * (1 - 1 / self.model.tagset_size_relation) + (1 / self.model.tagset_size_relation)
# if center_zh[:, center].item() > center_thresold:
# center_loss = kl_center_loss
# else:
# center_loss = ce_center_loss + kl_center_loss
# if relation_zh[:, relation].item() > relation_thresold:
# relation_loss = kl_relation_loss
# else:
# relation_loss = ce_relation_loss + kl_relation_loss
center_loss = ce_center_loss + 10 * kl_center_loss
relation_loss = ce_relation_loss + 10 * kl_relation_loss
loss = []
loss.append(center_loss)
loss.append(relation_loss)
gradients = [torch.tensor(1.0).cpu() for _ in range(len(loss))]
torch.autograd.backward(loss, gradients)
# loss = center_loss + relation_loss
# loss.backward()
self.optimizer.step()
running_loss1 += ce_center_loss.item()
running_loss2 += kl_center_loss.item()
running_loss3 += ce_relation_loss.item()
running_loss4 += kl_relation_loss.item()
trange.set_postfix(
{'ce_c': '{0:1.5f}'.format(running_loss1 / (step + 1)),
'kl_c': '{0:1.5f}'.format(running_loss2 / (step + 1)),
'ce_s': '{0:1.5f}'.format(running_loss3 / (step + 1)),
'kl_s': '{0:1.5f}'.format(running_loss4 / (step + 1))
}
)
print("\n")
print('[%d] loss of ce_center: %.5f' %
(epoch + 1, running_loss1 * self.batch_size / len(train_data)))
print('[%d] loss of kl_center: %.5f' %
(epoch + 1, running_loss2 * self.batch_size / len(train_data)))
print('[%d] loss of ce_relation: %.5f' %
(epoch + 1, running_loss3 * self.batch_size / len(train_data)))
print('[%d] loss of kl_relation: %.5f' %
(epoch + 1, running_loss4 * self.batch_size / len(train_data)))
# running_loss1 += loss[0].item()
# running_loss2 += loss[1].item()
# trange.set_postfix(
# {'center_loss' : '{0:1.5f}'.format(running_loss1 / (step + 1)),
# 'relation_loss' : '{0:1.5f}'.format(running_loss2 / (step + 1))
# }
# )
# print("\n")
# print('[%d] loss of center: %.5f' %
# (epoch + 1, running_loss1 * self.batch_size / len(train_data)))
# print('[%d] loss of relation: %.5f' %
# (epoch + 1, running_loss2 * self.batch_size / len(train_data)))
with torch.no_grad():
train_acc = self.test_accuracy("train", train_data)
with torch.no_grad():
valid_acc = self.test_accuracy("valid", valid_data)
torch.save(self.model.state_dict(), 'saved_model/model_rlat.pkl.{}'.format(epoch + 1))
with torch.no_grad():
test_acc = self.test_accuracy("test", test_data)
def test(self):
self.model.load_state_dict(torch.load("saved_model/pretrained_rlat.pkl")) # load pretrained model
self.model.eval()
collate_fn_rlat = RlatCollator(train_edu=False, train_trans=False, train_rlat=True)
test_data = DataLoader(
self.test_data,
batch_size=self.batch_size,
collate_fn=collate_fn_rlat
)
# with torch.no_grad():
# self.test_accuracy("train", train_data)
# with torch.no_grad():
# self.test_accuracy("valid", valid_data)
with torch.no_grad():
test_acc = self.test_accuracy("test", test_data)
def test_accuracy(self, phase, data):
l0 = l1 = l2 = l3 = 0
t0 = t1 = t2 = t3 = 0
c_l0 = c_l1 = c_l2 = c_l3 = 0
c_t0 = c_t1 = c_t2 = c_t3 = 0
total = n_corrects = n_wrongs = count = 0
causality2explanation = explanation2causality = 0
trange = tqdm(enumerate(data),
total=len(data),
desc=phase)
self.model.eval()
for step, (sent1, sent2, relation, center, _) in trange:
# if step == 10:
# break
sent1_torch = torch.tensor(sent1, dtype=torch.long).cpu()
sent2_torch = torch.tensor(sent2, dtype=torch.long).cpu()
# mask1_torch = torch.tensor(mask1,dtype=torch.long).cpu()
# mask2_torch = torch.tensor(mask2,dtype=torch.long).cpu()
relation_torch = torch.tensor([relation], dtype=torch.long).cpu()
center_torch = torch.tensor([center], dtype=torch.long).cpu()
center, relation = self.model(sent1_torch.view(self.batch_size, -1), sent2_torch.view(self.batch_size, -1))
max_score_relation, relation_idx = torch.max(relation, 1)
max_score_center, center_idx = torch.max(center, 1)
for j in range(0, len(relation_idx)):
if relation_idx[j] == relation_torch.view(-1)[j] and center_idx[j] == center_torch.view(-1)[j]:
n_corrects += 1
else:
n_wrongs += 1
total += len(relation_idx)
for j in range(0, len(relation_idx)):
if relation_idx[j] == 0:
t0 += 1
if relation_idx[j] == 1:
t1 += 1
if relation_idx[j] == 2:
t2 += 1
if relation_idx[j] == 3:
t3 += 1
for j in range(0, len(relation_idx)):
if relation_torch.view(-1)[j] == 0:
l0 += 1
if relation_torch.view(-1)[j] == 1:
l1 += 1
if relation_torch.view(-1)[j] == 2:
l2 += 1
if relation_torch.view(-1)[j] == 3:
l3 += 1
for j in range(0, len(center_idx)):
if center_idx[j] == 0:
c_t0 += 1
if center_idx[j] == 1:
c_t1 += 1
if center_idx[j] == 2:
c_t2 += 1
if center_idx[j] == 3:
c_t3 += 1
for j in range(0, len(center_idx)):
if center_torch.view(-1)[j] == 0:
c_l0 += 1
if center_torch.view(-1)[j] == 1:
c_l1 += 1
if center_torch.view(-1)[j] == 2:
c_l2 += 1
if center_torch.view(-1)[j] == 3:
c_l3 += 1
for j in range(0, len(relation_idx)):
if relation_idx[j] == 0 and relation_torch.view(-1)[j] == 3:
causality2explanation += 1
if relation_idx[j] == 3 and relation_torch.view(-1)[j] == 1:
explanation2causality += 1
print("\n")
print('causality = ', t0, " ans = ", l0)
print('coordination = ', t1, " ans = ", l1)
print('transition = ', t2, " ans = ", l2)
print('explanation = ', t3, " ans = ", l3)
print('front = ', c_t0, " ans = ", c_l0)
print('back = ', c_t1, " ans = ", c_l1)
print('equal = ', c_t2, " ans = ", c_l2)
print('multi = ', c_t3, " ans = ", c_l3)
print('causality2explanation = ', causality2explanation)
print('explanation2causality = ', explanation2causality)
print("\n")
print(total, " ", n_corrects, " ", n_wrongs)
acc = float(n_corrects) / float(total)
acc *= 100
print("the accuracy of " + phase + " data is: ", acc, "%")
return acc
|
Python
|
CL
|
6c5433b10a210ccc01aa739a7a0909d31f9fa9ac48a52c12fadb86ce29cc74d5
|
import sys
from enum import Enum, auto
from collections import namedtuple
from antlr4 import CommonTokenStream, ParseTreeWalker, FileStream, InputStream
if __name__ is not None and "." in __name__:
from .SPLListener import SPLListener
from .SPLParser import SPLParser
from .SPLLexer import SPLLexer
else:
from SPLParser import SPLParser
from SPLLexer import SPLLexer
from SPLListener import SPLListener
class CMD(Enum):
SEARCH = auto()
REPLACE = auto()
STATS = auto()
class SEARCH_KIND(Enum):
FULLTEXT = auto()
CMP = auto()
IN = auto()
class CMP_OP(Enum):
EQ = auto()
NEQ = auto()
LE = auto()
LT = auto()
GE = auto()
GT = auto()
class LOGICAL_OP(Enum):
AND = auto()
OR = auto()
NOT = auto()
FullTextSearch = namedtuple('FullTextSearch', 'is_leaf kind text')
FieldCmpSearch = namedtuple('FiledCmpSearch', 'is_leaf kind field op value')
FieldInSearch = namedtuple('FieldInSearch', 'is_leaf kind field values')
# And Or Not 的连接
# 其中 Not 为一元操作符,约定 left 参与运算,right 为 None
LogicalJointNode = namedtuple('LogicalJointNode', 'is_leaf op left right')
class Search:
cid = CMD.SEARCH
def __init__(self):
self.logical_tree = None
def _str_walk(self, node, depth):
if node is None:
return
indent = ' ' * depth
if node.is_leaf:
return indent + str(node)
else:
left = self._str_walk(node.left, depth+1)
right = self._str_walk(node.right, depth+1)
ret = f"{indent}({node.op}\n{left}\n"
# Not 不存在右分支
if right is not None:
ret += right + '\n'
ret += indent + ')'
return ret
def __str__(self):
ret = self._str_walk(self.logical_tree, 2)
if ret is None:
ret = ''
return f"<{self.__class__.__name__}>\n" + ret
class Replace:
cid = CMD.REPLACE
def __init__(self):
self.in_fields = None
self.with_ops = None
def __str__(self):
return (
f"<{self.__class__.__name__}>\n"
f" with_ops: {self.with_ops}\n"
f" in_fields: {self.in_fields}"
)
StatsAggTerm = namedtuple('StatsAggTerm', 'func func_field as_field')
class Stats:
cid = CMD.STATS
def __init__(self):
self.stats_agg_terms = None
self.by_fields = None
def __str__(self):
padding = '\n' + ' '*8
return (
f"<{self.__class__.__name__}>\n"
f" agg_terms:{padding}"
f"{padding.join(str(t) for t in self.stats_agg_terms)}\n"
f" by_fields: {self.by_fields}"
)
class Pipeliner(SPLListener):
def __init__(self):
self._cmds = []
@property
def cmds(self):
return self._cmds
def exitCmd(self, ctx: SPLParser.CmdContext):
ctx.ret = ctx.getChild(0).ret
def exitPipeline(self, ctx: SPLParser.PipelineContext):
self._cmds.append(ctx.headSearch().ret)
self._cmds.extend(c.ret for c in ctx.cmd())
# -------------------
# 基础节点
# -------------------
def exitCmpOp(self, ctx: SPLParser.CmpOpContext):
token_type = ctx.getChild(0).symbol.type
if token_type == SPLLexer.Eq:
ctx.ret = CMP_OP.EQ
elif token_type == SPLLexer.Le:
ctx.ret = CMP_OP.LE
elif token_type == SPLLexer.Lt:
ctx.ret = CMP_OP.LT
elif token_type == SPLLexer.Gt:
ctx.ret = CMP_OP.GT
elif token_type == SPLLexer.Ge:
ctx.ret = CMP_OP.GE
else:
ctx.ret = CMP_OP.NEQ
# 当前规则中没有wc-field,只用filed
def exitField(self, ctx: SPLParser.FieldContext):
# 当前field是 UnquotedValue,没有引号,不用处理
ctx.ret = ctx.getText()
def exitFieldList(self, ctx: SPLParser.FieldListContext):
# 只收集FieldContext,不需要COMMA
ctx.ret = [child.ret for child in ctx.getChildren(
lambda x: isinstance(x, SPLParser.FieldContext)
)]
def exitValue(self, ctx: SPLParser.ValueContext):
text = ctx.getText()
# 去除引号
if ctx.getChild(0).symbol.type == SPLLexer.QuotedString:
text = text[1:-1]
ctx.ret = text
def exitWcValue(self, ctx: SPLParser.WcValueContext):
if (child := ctx.value()):
ctx.ret = child.ret
elif ctx.getChild(0).symbol.type == SPLLexer.WcQuotedString:
ctx.ret = ctx.getText()[1:-1]
else:
ctx.ret = ctx.getText()
# -------------------
# Search
# -------------------
def exitFullTextSearch(self, ctx: SPLParser.FullTextSearchContext):
ctx.ret = FullTextSearch(is_leaf=True,
kind=SEARCH_KIND.FULLTEXT,
text=ctx.wcValue().ret)
def exitFieldCmpSearch(self, ctx: SPLParser.FieldCmpSearchContext):
ctx.ret = FieldCmpSearch(is_leaf=True,
kind=SEARCH_KIND.CMP,
field=ctx.field().ret,
op=ctx.cmpOp().ret,
value=ctx.wcValue().ret)
def exitParenSearch(self, ctx: SPLParser.ParenSearchContext):
ctx.ret = ctx.searchExpr().ret
def exitAndSearch(self, ctx: SPLParser.AndSearchContext):
ctx.ret = LogicalJointNode(is_leaf=False,
op=LOGICAL_OP.AND,
left=ctx.left.ret,
right=ctx.right.ret)
def exitOrSearch(self, ctx: SPLParser.OrSearchContext):
ctx.ret = LogicalJointNode(is_leaf=False,
op=LOGICAL_OP.OR,
left=ctx.left.ret,
right=ctx.right.ret)
def exitNotSearch(self, ctx: SPLParser.NotSearchContext):
ctx.ret = LogicalJointNode(is_leaf=False,
op=LOGICAL_OP.NOT,
left=ctx.left.ret,
right=None)
def exitSearch(self, ctx: SPLParser.SearchContext):
search = Search()
if (child := ctx.searchExpr()):
search.logical_tree = child.ret
ctx.ret = search
def exitHeadSearch(self, ctx: SPLParser.HeadSearchContext):
search = Search()
if (child := ctx.searchExpr()):
search.logical_tree = child.ret
ctx.ret = search
# -------------------
# Replace
# -------------------
def exitReplaceExpr(self, ctx: SPLParser.ReplaceExprContext):
ctx.ret = (ctx.old_val.ret, ctx.new_val.ret)
def exitReplaceExprList(self, ctx: SPLParser.ReplaceExprListContext):
# 只收集ReplceExprContext,不需要COMMA
ctx.ret = [child.ret for child in ctx.getChildren(
lambda x: isinstance(x, SPLParser.ReplaceExprContext)
)]
def exitReplace(self, ctx: SPLParser.ReplaceContext):
replace = Replace()
replace.with_ops = ctx.replaceExprList().ret
if (field_list := ctx.fieldList()):
replace.in_fields = field_list.ret
ctx.ret = replace
# -------------------
# Stats
# -------------------
def exitStatsFunc(self, ctx: SPLParser.StatsFuncContext):
ctx.ret = ctx.getText()
def exitStatsAggTerm(self, ctx: SPLParser.StatsAggTermContext):
func = ctx.statsFunc().ret
func_field = ctx.func_field.ret
as_field = ctx.as_field.ret if ctx.as_field else None
term = StatsAggTerm(func, func_field, as_field)
ctx.ret = term
def exitStatsAggTermList(self, ctx: SPLParser.StatsAggTermListContext):
ctx.ret = [child.ret for child in ctx.getChildren(
lambda x: isinstance(x, SPLParser.StatsAggTermContext)
)]
def exitStats(self, ctx: SPLParser.StatsContext):
stats = Stats()
stats.stats_agg_terms = ctx.statsAggTermList().ret
if (field_list := ctx.fieldList()):
stats.by_fields = field_list.ret
ctx.ret = stats
if __name__ == '__main__':
if len(sys.argv) > 1:
input_stream = FileStream(sys.argv[1], encoding='utf8')
else:
input_stream = InputStream(sys.stdin.read())
lexer = SPLLexer(input_stream)
token_stream = CommonTokenStream(lexer)
parser = SPLParser(token_stream)
tree = parser.pipeline()
walker = ParseTreeWalker()
pipeliner = Pipeliner()
walker.walk(pipeliner, tree)
for c in pipeliner.cmds:
print(c)
|
Python
|
CL
|
914584c8fd5aadda7fcab268b0f9b1b69cb6b6d7e75ec4da85d96eabeedcbc75
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import socket
import ssl
from typing import Union, Tuple, Optional
from .connection import TcpConnection, tcpConnectionTypes, TcpConnectionUninitializedException
class TcpClientConnection(TcpConnection):
"""An accepted client connection request."""
def __init__(self,
conn: Union[ssl.SSLSocket, socket.socket],
addr: Tuple[str, int]):
super().__init__(tcpConnectionTypes.CLIENT)
self._conn: Optional[Union[ssl.SSLSocket, socket.socket]] = conn
self.addr: Tuple[str, int] = addr
@property
def connection(self) -> Union[ssl.SSLSocket, socket.socket]:
if self._conn is None:
raise TcpConnectionUninitializedException()
return self._conn
|
Python
|
CL
|
da52e977a1a9c6c0de4c529952ae506e50cd9e77bb22f56a22729869462a2c33
|
"""
D365API.TestAccess
~~~~~~~~~~~~~~~~~~
"""
import json
import os
import unittest
from D365API.Access import Access
class TestAccess(unittest.TestCase):
"""Test the Access module."""
@classmethod
def setUpClass(cls):
"""Prepare test class.
Get the Test Data from JSON (JavaScript Object Notation) file.
"""
# Get the current directory of the file
current_directory = os.path.dirname(os.path.abspath(__file__))
# Get the path of the test data file
test_access_file = os.path.join(current_directory, "TestData.json")
# Open the file for reading
with open(test_access_file, "r") as f:
cls._data = json.load(f)
# Get the hostname
cls._hostname = cls._data["organizations"]["name"]
def test_login_rest_v1_failure(self):
"""Test a failure of REST (REpresentational State Transfer) login method.
Get the failure username and password (user_rest_failure) from the
Test Data file and login. Should result in login method returning None
value.
"""
# Get the user data for success login
user_rest_v1_failure = self._data["systemusers"]["user_rest_v1_failure"]
# Create an instance of Access object and login
access = Access(hostname=self._hostname,
client_id=user_rest_v1_failure["client_id"],
client_secret=user_rest_v1_failure["client_secret"],
tenant_id=user_rest_v1_failure["tenant_id"]).login()
# Test to ensure access is not a string
self.assertNotEqual(type(access), str)
def test_login_rest_v1_success(self):
"""Test a success of REST (REpresentational State Transfer) login method.
Get the success username and password (user_rest_v1_success) from the
Test Data file and login. Should result in login method returning an
access token.
"""
# Get the user data for success login
user_rest_v1_success = self._data["systemusers"]["user_rest_v1_success"]
# Create an instance of Access object and login
access = Access(hostname=self._hostname,
client_id=user_rest_v1_success["client_id"],
client_secret=user_rest_v1_success["client_secret"],
tenant_id=user_rest_v1_success["tenant_id"]).login()
# Test to ensure access is a string
self.assertEqual(type(access), str)
if __name__ == "__main__":
unittest.main()
|
Python
|
CL
|
026bd71f764adecd2b105306fa133ee772b3f8f58b8d2a854ab79346fa4a8d99
|
"""
@description: 匹配接口
@author: Wu Jiang-Heng
@email: jiangh_wu@163.com
@time: 2019-05-29
@version: 0.0.1
"""
from os.path import join, exists
from os import mkdir
import pickle
import yaml
import logging
from time import time
from sementic_server.source.intent_extraction.recognizer import Recognizer
from sementic_server.source.intent_extraction.system_info import SystemInfo
from sementic_server.source.intent_extraction.logger import construt_log, get_logger
server_logger = logging.getLogger("server_log")
UNLABEL = 'UNLABEL'
def load_actree(pkl_path):
"""
加载已保存的Recognizer实例
:param pkl_path:
:return:ac自动机对象
"""
start = time()
with open(pkl_path, "rb") as f:
reg = pickle.load(f)
server_logger.info(f"Loading AC-Tree \"{pkl_path.split('/')[-1]}\", time used: {time() - start}")
return reg
def build_actree(dict_info, pkl_path):
"""
创建Recognizer实例
:param pkl_path:
:return:ac自动机对象
"""
start = time()
reg = Recognizer(dict_info)
pickle.dump(reg, open(pkl_path, "wb"))
server_logger.info(f"Building AC-Tree \"{pkl_path.split('/')[-1]}\", time used: {time() - start}")
return reg
def _resolve_list_confilct(raw_list, ban_list):
"""
消解raw_list和ban_list的冲突
:param raw_list: 需要被消解冲突的部分
:param ban_list: 禁止出现的位置索引
:return:
"""
if ban_list == list():
return raw_list
res_list = []
index_ban = set()
for ban in ban_list:
if type(ban) in {list, tuple}:
index_ban.update(list(range(ban[0], ban[1])))
else:
index_ban.update(list(range(ban["begin"], ban["end"])))
for item in raw_list:
if type(item) in {list, tuple}:
item_range = list(range(item[0], item[1]))
else:
item_range = list(range(item["begin"], item["end"]))
if index_ban.intersection(item_range) == set():
res_list.append(item)
return res_list
def replace_items_in_sentence(sentence, items):
"""
替换句子在item中出现的元素
:param sentence: 原始句子
:param items: 需要替换的item ((begin, end, value),())
:return:
"""
size = len(items)
if size < 1:
return sentence
sentence_after_replace = ""
index = 0
for position, char in enumerate(sentence):
if position is items[index][0]:
sentence_after_replace += items[index][2]
elif position not in range(items[index][0] + 1, items[index][1]):
sentence_after_replace += char
if position is items[index][1] - 1 and index < size - 1:
index += 1
return sentence_after_replace
def _update_account_in_sentence(accounts: list, sentence: str):
"""
更新账号在句子中的位置
:param accounts:
:param sentence:
:return:
"""
for index, info in enumerate(accounts):
begin = sentence.find(info["value"])
if begin is not info["begin"]:
accounts[index]["begin"] = begin
accounts[index]["end"] = begin + len(info["value"])
class ItemMatcher(object):
"""
@description: 匹配接口类
@author: Wu Jiang-Heng
@email: jiangh_wu@163.com
@time: 2019-05-29
@version: 0.0.1
"""
def __init__(self, new_actree=False):
self.reg = None # 识别AC
self.corr = None # 纠错AC
si = SystemInfo()
self.correct_logger = get_logger("correction", si.log_path_corr)
# 获得根目录的地址
self.dir_data = join(si.base_path, "data")
self.dir_yml = join(self.dir_data, "yml")
self.dir_output = join(si.base_path, "output")
self.dir_pkl = join(self.dir_output, "pkl")
# 获得关系词和疑问词的类型词典和纠错词典
self.relations, self.relation_code, self.ques_word, self.wrong_word = dict(), dict(), dict(), dict()
try:
self.relations = yaml.load(open(join(self.dir_yml, "relation.yml"),
encoding="utf-8"), Loader=yaml.SafeLoader)
self.relation_code = yaml.load(open(join(self.dir_yml, "relation_code.yml"),
encoding="utf-8"), Loader=yaml.SafeLoader)
self.ques_word = yaml.load(open(join(self.dir_yml, "quesword.yml"),
encoding="utf-8"), Loader=yaml.SafeLoader)
self.wrong_word = yaml.load(open(join(self.dir_yml, "wrong_table.yml"),
encoding="utf-8"), Loader=yaml.SafeLoader)
except FileNotFoundError as e:
server_logger.error(f"Cannot find the file in {self.dir_yml}, {e}")
self.reg_dict = self.relations.copy()
self.reg_dict.update(self.ques_word)
# actree
if not exists(self.dir_pkl):
mkdir(self.dir_pkl)
self.path_corr = join(self.dir_pkl, "corr.pkl")
self.path_reg = join(self.dir_pkl, "reg.pkl")
if new_actree:
self.corr = build_actree(dict_info=self.wrong_word, pkl_path=self.path_corr)
self.reg = build_actree(dict_info=self.reg_dict, pkl_path=self.path_reg)
else:
if not exists(self.path_corr):
self.corr = build_actree(dict_info=self.wrong_word, pkl_path=self.path_corr)
else:
self.corr = load_actree(pkl_path=self.path_corr)
if not exists(self.path_reg):
self.reg = build_actree(dict_info=self.reg_dict, pkl_path=self.path_reg)
else:
self.reg = load_actree(pkl_path=self.path_reg)
del self.wrong_word
del si
def correct(self, query, ban_list=None):
"""
纠错函数,能够过滤账号识别的中的账号
:param query: 原始查询
:param ban_list: 应当屏蔽的位置
:return: 纠错列表
"""
start_time = time()
res_correction = {"correct_query": query, "correct": []}
query4type = self.corr.query4type(query.lower())
# 处理与账号识别冲突的部分
if ban_list is not None:
query4type = _resolve_list_confilct(query4type, ban_list)
record = []
# change the query to the lower.
for item in query4type:
item["value"] = query[item["begin"]: item["end"]]
res_correction["correct"].append(item)
record.append((item['begin'], item['end'], item['type']))
res_correction["correct_query"] = replace_items_in_sentence(query, record)
self.correct_logger.info(
f"{construt_log(raw_query=query, correct_info=res_correction, using_time=time()-start_time)}")
server_logger.info(f"Correcting the query time used: {time()-start_time}")
return res_correction
def match(self, query: str, need_correct=True, accounts_info=None):
"""
:param query: 用户的原始查询
:param need_correct: 是否需要纠错
:return: 纠错、关系识别的结果
:param accounts_info:
语义解析模块只需要关注'query'字段的结果。
"""
# 找出所有账号
accounts_info = accounts_info if accounts_info is not None else {}
accounts = accounts_info.get("accounts", [])
res = {
"relation": [],
"intent": None,
"raw_query": query,
"query": query, # 如果有纠错之后的查询,则为纠错之后的查询,否则为原句
"is_corr": need_correct,
"correct_info": None,
"accounts": accounts
}
if need_correct:
# 记录unlabel标签
labelled_list = [(account['begin'], account['end']) for account in accounts
if account['type'] is not UNLABEL]
correct_info = self.correct(query, labelled_list) # 纠错
res["correct_info"] = correct_info # 赋值
res["query"] = res["correct_info"]["correct_query"]
res["accounts"] = \
_resolve_list_confilct(res["accounts"], res["correct_info"]["correct"])
_update_account_in_sentence(res["accounts"], res["query"])
for item in self.reg.query4type(res["query"]): # 寻找query中的关系词、疑问词
if item["type"] in self.relations.keys():
item["code"] = self.relation_code[item["type"]]
res["relation"].append(item)
elif item["type"] in self.ques_word:
if res["intent"] is not None and res["intent"] != item["type"]:
res["intent"] = 'conflict' # 冲突
else:
res["intent"] = item["type"]
return res
if __name__ == '__main__':
from pprint import pprint
i = "李帅的laopo麻麻在哪shang班?"
from sementic_server.source.intent_extraction.dict_builder import build_wrong_table
build_wrong_table()
im = ItemMatcher(new_actree=True)
pprint(im.match(i))
while True:
i = input()
pprint(im.match(i))
|
Python
|
CL
|
62c7045cb54bffe25d3a6cdb8e48121d9302767b4e8c1ae4510668fc40234776
|
import os
import time
from abc import ABC, abstractmethod
from common.commands import Compress, Upload, SendMsg, Download, Decompress
from common.configuration import AWSPathManager
from common.protocol import IOTask, AWSMsg, AWSIDRegistration
from common.resources import Folder, File, OSPath
from multipledispatch import dispatch
class Issuer(ABC):
@abstractmethod
def issue(self, task: AWSMsg):
pass
class AWSIssuer(Issuer):
def __init__(self, aws_path_manager: AWSPathManager):
self._aws_path_manager = aws_path_manager
@staticmethod
def dependencies(task: IOTask):
deps = []
cwd = Folder.cwd()
deps.extend(map(lambda f: cwd.relative(f),
map(lambda p: OSPath.new(p), filter(lambda arg: os.path.exists(arg), task.command.shell))))
deps.extend(map(lambda f: cwd.relative(f), task.command.deps))
return deps
def _operands(self, task: IOTask):
resources = Compress(task.workspace.input, *AWSIssuer.dependencies(task)).execute()
uploaded = Upload(self._aws_path_manager.server_path, self._aws_path_manager.bucket_path, resources).execute()
# Echo status back to user.
print("Resources {0} is transfered\n".format(uploaded.path))
time.sleep(1)
def _operator(self, task: IOTask):
return SendMsg(self._aws_path_manager.server_path, self._aws_path_manager.taskq_path, task).execute()
def _clean_files(self, task: IOTask):
if os.path.exists(task.workspace.local_input):
os.remove(task.workspace.local_input)
if os.path.exists(task.workspace.local_output):
os.remove(task.workspace.local_output)
def _output(self, task: IOTask):
retrieved = Download(self._aws_path_manager.server_path, self._aws_path_manager.bucket_path, task.workspace.output,
task.command.timeout).execute()
if retrieved:
cwd = Folder(os.path.normpath(os.getcwd()))
# files to extract
stdout_report = File('stdout')
stderr_report = File('stderr')
target = Decompress(cwd, retrieved, stdout_report, stderr_report).execute()
# report
File.new(target.relative(stdout_report)).content(header=" STDOUT ")
File.new(target.relative(stderr_report)).content(header=" STDERR ")
#
if task.perf_file:
Decompress(task.lwd.relative(task.workspace.root).create(), task.workspace.local_input).execute()
print("Task executed successfully")
else:
print("failed to retrieve, re-submit the job!!!")
self._clean_files(task)
@dispatch(IOTask)
def issue(self, task):
self._operands(task)
self._operator(task)
self._output(task)
@dispatch(AWSIDRegistration)
def issue(self, reg):
SendMsg(self._aws_path_manager.server_path, self._aws_path_manager.regq_path, reg, True).execute()
|
Python
|
CL
|
d1d17c1adaa74ff623fa2fc899023297b0a0f9e0df758c828355e0e16ae887e4
|
#!/usr/bin/env python2
import json
from SidechainTestFramework.sc_boostrap_info import SCNodeConfiguration, SCCreationInfo, MCConnectionInfo, \
SCNetworkConfiguration
from SidechainTestFramework.sc_test_framework import SidechainTestFramework
from test_framework.util import assert_equal, initialize_chain_clean, start_nodes, \
websocket_port_by_mc_node_index, connect_nodes_bi, assert_true, assert_false
from SidechainTestFramework.scutil import check_box_balance, connect_sc_nodes, \
bootstrap_sidechain_nodes, start_sc_nodes, is_mainchain_block_included_in_sc_block, generate_next_blocks, \
check_mainchain_block_reference_info, check_wallet_balance
"""
Check the websocket connection between sidechain and mainchain nodes.
Configuration: start 2 mainchain nodes and 2 sidechain nodes (with default websocket configuration) connected,
respectively, to the first and second mainchain node. Mainchain nodes are not connected between them.
Sidechain nodes are not connected between them. The sidechain is bootstrapped from Mc node 1.
Test:
- verify genesis information for SC node 1 and 2
- MC 1 mine a new block
- SC node 1 forges 1 SC block
- verify the block is included inside SC nodes 1
- verify the block is NOT included inside SC node 2
- connect MC 1 to MC 2
- connect SC 1 to SC 2
- verify the block is included inside SC node 2
"""
class MCSCConnectedNodes(SidechainTestFramework):
number_of_mc_nodes = 2
number_of_sidechain_nodes = 2
sc_nodes_bootstrap_info=None
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, self.number_of_mc_nodes)
def setup_nodes(self):
return start_nodes(self.number_of_mc_nodes, self.options.tmpdir)
def sc_setup_chain(self):
mc_node_1 = self.nodes[0]
mc_node_2 = self.nodes[1]
sc_node_1_configuration = SCNodeConfiguration(
MCConnectionInfo(address="ws://{0}:{1}".format(mc_node_1.hostname, websocket_port_by_mc_node_index(0)))
)
sc_node_2_configuration = SCNodeConfiguration(
MCConnectionInfo(address="ws://{0}:{1}".format(mc_node_2.hostname, websocket_port_by_mc_node_index(1)))
)
network = SCNetworkConfiguration(SCCreationInfo(mc_node_1, 600, 1000),
sc_node_1_configuration, sc_node_2_configuration)
self.sc_nodes_bootstrap_info = bootstrap_sidechain_nodes(self.options.tmpdir, network)
def sc_setup_nodes(self):
return start_sc_nodes(self.number_of_sidechain_nodes, self.options.tmpdir)
def run_test(self):
mc_nodes = self.nodes
sc_nodes = self.sc_nodes
print "Number of started mc nodes: {0}".format(len(mc_nodes), "The number of MC nodes is not {0}.".format(self.number_of_mc_nodes))
print "Number of started sc nodes: {0}".format(len(sc_nodes), "The number of SC nodes is not {0}.".format(self.number_of_sidechain_nodes))
first_mainchain_node = mc_nodes[0]
second_mainchain_node = mc_nodes[1]
first_sidechain_node = sc_nodes[0]
second_sidechain_node = sc_nodes[1]
wallet_balance = self.sc_nodes_bootstrap_info.genesis_account_balance
genesis_account = self.sc_nodes_bootstrap_info.genesis_account
mainchain_block_height = self.sc_nodes_bootstrap_info.mainchain_block_height
first_mainchain_node_block = first_mainchain_node.getblock(str(mainchain_block_height))
# verify genesis information for SC node 1 and 2
# verify the mc block is included inside SC nodes 1 and 2
first_sc_node_best_block = first_sidechain_node.block_best()["result"]
second_sc_node_best_block = second_sidechain_node.block_best()["result"]
assert_equal(first_sc_node_best_block["height"], 1, "The best block has not the specified height.")
assert_equal(second_sc_node_best_block["height"], 1, "The best block has not the specified height.")
sc_1_mc_block_inclusion = is_mainchain_block_included_in_sc_block(first_sc_node_best_block["block"],
first_mainchain_node_block)
sc_2_mc_block_inclusion = is_mainchain_block_included_in_sc_block(second_sc_node_best_block["block"],
first_mainchain_node_block)
assert_true(sc_1_mc_block_inclusion, "The mainchain block is not included for SC node 1.")
assert_true(sc_2_mc_block_inclusion, "The mainchain block is not included for SC node 2.")
first_sc_mc_best_block_ref_info = first_sidechain_node.mainchain_bestBlockReferenceInfo()["result"]
second_sc_mc_best_block_ref_info = second_sidechain_node.mainchain_bestBlockReferenceInfo()["result"]
assert_true(
check_mainchain_block_reference_info(
first_sc_mc_best_block_ref_info, first_mainchain_node_block),
"The mainchain block is not included inside SC block reference info.")
assert_true(
check_mainchain_block_reference_info(
second_sc_mc_best_block_ref_info, first_mainchain_node_block),
"The mainchain block is not included inside SC block reference info.")
check_box_balance(first_sidechain_node, genesis_account, 3, 1, wallet_balance)
check_wallet_balance(first_sidechain_node, wallet_balance)
# MC 1 mine a new block
block_hash = first_mainchain_node.generate(1)[0]
first_mainchain_node_new_block = first_mainchain_node.getblock(block_hash)
# SC node 1 forges 1 SC block
generate_next_blocks(first_sidechain_node, "first node", 1)
# verify the block is included inside SC node 1
first_sc_node_best_block = first_sidechain_node.block_best()["result"]
second_sc_node_best_block = second_sidechain_node.block_best()["result"]
assert_equal(first_sc_node_best_block["height"], 2, "The best block has not the specified height.")
sc_1_mc_block_inclusion = is_mainchain_block_included_in_sc_block(first_sc_node_best_block["block"],
first_mainchain_node_new_block)
assert_true(sc_1_mc_block_inclusion, "The mainchain block is not included for SC node 1.")
# verify the mc block is NOT included inside SC node 2
sc_2_mc_block_inclusion = is_mainchain_block_included_in_sc_block(second_sc_node_best_block["block"],
first_mainchain_node_new_block)
assert_false(sc_2_mc_block_inclusion, "The mainchain block is included for SC node 2.")
first_sc_mc_best_block_ref_info = first_sidechain_node.mainchain_bestBlockReferenceInfo()["result"]
second_sc_mc_best_block_ref_info = second_sidechain_node.mainchain_bestBlockReferenceInfo()["result"]
assert_true(
check_mainchain_block_reference_info(
first_sc_mc_best_block_ref_info, first_mainchain_node_new_block),
"The mainchain block is not included inside SC block reference info.")
assert_false(
check_mainchain_block_reference_info(
second_sc_mc_best_block_ref_info, first_mainchain_node_new_block),
"The mainchain block is not included inside SC block reference info.")
# connect MC 1 to MC 2
connect_nodes_bi(self.nodes, 0, 1)
self.sync_all()
# connect SC 1 to SC 2
connect_sc_nodes(self.sc_nodes[0], 1)
self.sc_sync_all()
# verify the block is included inside SC node 2
second_sc_node_best_block = second_sidechain_node.block_best()["result"]
sc_2_mc_block_inclusion = is_mainchain_block_included_in_sc_block(second_sc_node_best_block["block"], first_mainchain_node_new_block)
assert_true(sc_2_mc_block_inclusion, "The mainchain block is not included for SC node 2.")
if __name__ == "__main__":
MCSCConnectedNodes().main()
|
Python
|
CL
|
9e2fffaccd76be944383c4593b0f7e12efbe479982501c2f2a5c92df045bbcda
|
import sys
sys.path.append("../")
import numpy as np
from operation import *
class feature():
def __init__(self):
pass
def getKey(self, board, num):
pass
def updateScore(self, board, delta):
pass
def getScore(self, board):
pass
def setSymmetricBoards(self, rotateSymmetry, isomorphic):
"""
:param rotateSymmetry: including (up, down, letf, right) 4 boards
:param isomorphic: including rotateSymmetry board and its mirrorsymmetric board, total 8 boards
"""
self.rotateBoards = rotateSymmetry
self.isomorphicBoards = isomorphic
def getRotateBoards(self):
"""
:return: rotatedSymmetry board
"""
return self.rotateBoards
# horizontal symmetric
def getMirrorSymmetricBoard(self, board):
"""
:param board: board state
:return: mirror symmetric board
"""
reverseRows = reverseRow(board)
return reverseRows
|
Python
|
CL
|
3e75e8a0c79232ea6a539b73d974b87ea493ae4ab171653c47e2ce8120751104
|
import torch
from torch.autograd import Variable
from torchtext import data
from torchtext import datasets
from torchtext.vocab import Vectors, GloVe
import spacy
import joblib
import matplotlib.pyplot as plt
from matplotlib import ticker
BOS_WORD = '<s>'
EOS_WORD = '</s>'
MAX_LEN = 20
MIN_FREQ = 5
def tokenize(text, spacy):
return [tok.text for tok in spacy.tokenizer(text)]
def load_dataset_from_fields(DE, EN):
# Loads German-to-English dataset
train, val, test = datasets.IWSLT.splits(
exts=('.de', '.en'), fields=(DE, EN),
filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and
len(vars(x)['trg']) <= MAX_LEN
)
return train, val, test
def load_dataset(batch_size, use_pretrained_emb=False, save_dir='.save'):
'''
Each batch has dimensions (seq_len, batch_size)
Returns tuple of (train iterator, val iterator, test iterator, TRG, SRC).
'''
print('Loading German-English data...')
fname = './{}/vocabs.jl'.format(save_dir)
try:
data_dict = joblib.load(fname)
if ((MAX_LEN != data_dict['max_len']) or (MIN_FREQ != data_dict['min_freq'])):
raise ValueError()
print('Using cached vocabs...')
DE = data_dict['DE']
EN = data_dict['EN']
train, val, test = load_dataset_from_fields(DE, EN)
except:
# Load tokenizers
spacy_de = spacy.load('de')
spacy_en = spacy.load('en')
DE = data.Field(tokenize=lambda text: tokenize(text, spacy_de))
EN = data.Field(tokenize=lambda text: tokenize(text, spacy_en),
init_token = BOS_WORD, eos_token = EOS_WORD)
train, val, test = load_dataset_from_fields(DE, EN)
# Build vocabulary
DE.build_vocab(train.src, min_freq=MIN_FREQ)
EN.build_vocab(train.trg, min_freq=MIN_FREQ)
# Load pretrained embeddings
if use_pretrained_emb == 'GloVe':
print('Loading GloVe EN embeddings...')
EN.vocab.load_vectors(vectors=GloVe(name='6B'))
elif use_pretrained_emb == 'fastText':
print('Loading fastText EN / DE embeddings...')
en_url = 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.simple.vec'
de_url = 'https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki.de.vec'
EN.vocab.load_vectors(vectors=Vectors('wiki.simple.vec', url=en_url))
DE.vocab.load_vectors(vectors=Vectors('wiki.de.vec', url=de_url))
# Save vocab fields
data_dict = {'DE': DE, 'EN': EN, 'max_len': MAX_LEN, 'min_freq': MIN_FREQ}
joblib.dump(data_dict, fname)
# Train-validation split
train_iter, val_iter, test_iter = data.BucketIterator.splits(
(train, val, test), batch_size=batch_size, device=-1,
repeat=False, sort_key=lambda x: len(x.src)
)
print("[TRAIN]:{} (dataset:{})\t[VAL]:{} (dataset:{})\t[TEST]:{} (dataset:{})".format(
len(train_iter), len(train_iter.dataset),
len(val_iter), len(val_iter.dataset),
len(test_iter), len(test_iter.dataset)))
print("[SRC_vocab]:{} (DE)\t[TRG_vocab]:{} (EN)".format(len(DE.vocab), len(EN.vocab)))
return train_iter, val_iter, test_iter, DE, EN
def get_src_and_trgs(batch, use_cuda, is_eval):
'''
Returns tuple of Variables representing
(src, trg_input, trg_targets) for batch.
Each batch has shape (batch_size, seq_len)
'''
src_and_trgs = (batch.src, batch.trg[:-1], batch.trg[1:])
out = tuple(o.data.t().contiguous() for o in src_and_trgs)
if use_cuda:
return tuple(Variable(o.cuda(), volatile=is_eval) for o in out)
else:
return tuple(Variable(o, volatile=is_eval) for o in out)
def seq_to_text(seq, TEXT):
'''
seq: torch.Tensor
'''
return [TEXT.vocab.itos[idx] for idx in seq]
def sample(num_samples, src, trg, pred, SRC, TRG):
'''
Sample src, trg, and pred sentences.
all inputs are Tensors
'''
for i in range(num_samples):
print('>>>>> SAMPLE {}'.format(i))
print('[SRC] {}'.format(' '.join(seq_to_text(src[i], SRC))))
print('[TRG] {}'.format(' '.join(seq_to_text(trg[i], TRG))))
print('[PRED] {}'.format(' '.join(seq_to_text(pred[i], TRG))))
def visualize_attn(attn, src, pred, trg, fname):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attn, cmap='bone')
fig.colorbar(cax)
# Set up axes
pred_and_trgs = ['[{}] {}'.format(t, p) for p, t in zip(pred, trg)]
ax.set_xticklabels([''] + src, rotation=90)
ax.set_yticklabels([''] + pred_and_trgs)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# Save
plt.savefig(fname)
plt.close()
|
Python
|
CL
|
8c426838eeba3be90535815b53365a313995be85ee7f237878f46ba5dcea200c
|
1, tab completion
2, a? provide information on a
a?? also include definition of function a if possible
*a*? provide names matching the string with wildcards
4, Ctrl+C: exit during running a program
5, paste: Ctrl+Shift+V / %paste / %cpaste(use #4 to quit this mode)
6, keyboard shortcuts: Ctrl+U/Ctrl+K <-> Ctrl+y
7, magic commands:
%reset -f : force reset
%automagic : switch if % is needed
%magic, %quickref: all magic commands
%hist: Print command input (and optionally output) history
%pdb: Automatically enter debugger after any exception
%paste: Execute pre-formatted Python code from clipboard
%cpaste: Open a special prompt for manually pasting Python code to be executed
%reset: Delete all variables / names defined in interactive namespace
%page: OBJECT Pretty print the object and display it through a pager
%run script.py: Run a Python script inside IPython
%prun statement: Execute statement with cProfile and report the profiler output
%time statement: Report the execution time of single statement
%timeit statement: Run a statement multiple times to compute an emsemble average execution time. Useful for
timing code with very short execution time
%who, %who_ls, %whos: Display variables defined in interactive namespace, with varying levels of information / verbosity
%xdel variable: Delete a variable and attempt to clear any references to the object in the IPython internals
%logstart/%logon/%logoff/%logstate/%logstate
%alias ll ls -l: ll will mean ls -l(detailed ls)
%alias test_alias (cd ch08; ls; cd ..): also good for a seq of commands.
%bookmark db /home/wesm/Dropbox/
%bookmark -l: display all bookmarks
#bookmark persists between ipython sessions
%debug: useful when calling it after an exception is thrown, will start with the error code
%pdb: debug will be invoked when exception is thrown
8, ipython qtconsole --pylab=inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
img=mpimg.imread('stinkbug.png')
plt.imshow(img)
9, matplotlib integration
$ ipython --pylab: matplotlib plot window will be created automatically; most namespace of numpy/matplotlib will be imported.
10, output/input variables
_/__ : the last/2nd-last output result
_ix: x is the line number, variable name in line x
_x: store the output result for line x
11, Integration with system
!cmd: execute some system cmd
#get IP address
ip_info = !ifconfig eth0 | grep "inet "
ip_info[0].strip()
12, ! and $: substitute variable content defined in current envioronment
foo = 'test*'
!ls $foo
13, debugger commands
h: display command list
help cmd: show info of cmd
c: continue program execution
q: quit debugger
b no: set breakpoint line as no in current file.
b filename:no: set breakpoint line as no in filename
s: step into function call
n: execute current line and advance to next line
u/d: move up/down in function call stack
a: show argument for current function
debug statement: Invoke statement statement in new (recursive) debugger
l(ist) statement: Show current position and context at current level of stack
w(here): Print full stack trace with context at current position
#other ways to use debugger
def set_trace():
from IPython.core.debugger import Pdb
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def debug(f, *args, **kwargs):
from IPython.core.debugger import Pdb
pdb = Pdb(color_scheme='Linux')
return pdb.runcall(f, *args, **kwargs)
#put above codes in profile
14, basic profiling
#in command line
python -m cProfile -s cumulative cprof_example.py
#display execution times of functions in cprof_example.py in sorted order
#in ipython
%prun -l 7 -s cumulative run_experiment()
%run -p -s cumulative cprof_example.py
#line-by-line profiling: line_profiler library
%lprun -f func1 -f func2 statement_to_profile
15, ipython html notebook
$ ipython notebook --pylab=inline
#cloud notebook
16, module dependency
#modules are loaded only once by default, so if we modified a module imported earlier and want to use the updated:
reload(some_lib)
17, make class friendly
def __repr__(self):
18, create multiple profile configurations
ipython profile create secret_project
$ ipython --profile=secret_project
https://notebookcloud.appspot.com/login
|
Python
|
CL
|
828e09c10ac6a815b2acba6d1dfe6c64a3e7b6d0dae8fe7e8ea51248aa8b75b5
|
#flake8: noqa
'''
Generate trees for measuring and comparing L1 and UCT efficiencies with
respect to RECO objects.
Usage:
./makeEfficiencyTree_cfg.py
Optional arguments:
inputFiles=myFile.root outputFile=outputFile.root maxEvents=-1
Authors: L. Dodd, N. Woods, I. Ojalvo, S. Dasu, M. Cepeda, E. Friis (UW Madison)
'''
import FWCore.ParameterSet.Config as cms
import os
# Get command line options
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing ('analysis')
# Set useful defaults
#options.inputFiles = '/store/user/tapas/ETauSkim/skim_12_1_erV.root'
options.outputFile = "uct_efficiency_tree.root"
options.register(
'eicIsolationThreshold',
3,
VarParsing.multiplicity.singleton,
VarParsing.varType.int,
"EIC Isolation threshold")
options.register(
'hActivityCut',
0.5,
VarParsing.multiplicity.singleton,
VarParsing.varType.float,
"HCAL activity threshold")
options.register(
'ecalCalib',
'CALIB_V4',
VarParsing.multiplicity.singleton,
VarParsing.varType.string,
'Can be CALIB_V1, CALIB_V3, or CALIB_V4')
options.register(
'eicCardHcalOnly',
0,
VarParsing.multiplicity.singleton,
VarParsing.varType.int,
'If 1, turn off the ECAL for the stage1 EGTau path.')
options.register(
'isMC',
0,
VarParsing.multiplicity.singleton,
VarParsing.varType.int,
'Set to 1 for simulated samples - updates GT, emulates HCAL TPGs.')
options.parseArguments()
process = cms.Process("L1UCTEfficiency")
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
process.load('JetMETCorrections.Configuration.DefaultJEC_cff')
process.load('Configuration/StandardSequences/FrontierConditions_GlobalTag_cff')
if 'CMSSW_6' in os.environ['CMSSW_VERSION']:
process.GlobalTag.globaltag = 'POSTLS162_V2::All'
print "Using global tag for upgrade MC: %s" % process.GlobalTag.globaltag
if not options.isMC:
raise ValueError("There is no data in CMSSW 6, you must mean isMC=1")
else:
if not options.isMC:
# CMSSW 5 data
process.GlobalTag.globaltag = 'GR_R_53_V21::All'
else:
# CMSSW 5 MC
process.GlobalTag.globaltag = 'START53_V7B::All'
process.GlobalTag.connect = 'frontier://FrontierProd/CMS_COND_31X_GLOBALTAG'
process.GlobalTag.pfnPrefix = cms.untracked.string('frontier://FrontierProd/')
print "Using global tag for 52X data: %s" % process.GlobalTag.globaltag
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
process.source = cms.Source(
"PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles)
)
process.TFileService = cms.Service(
"TFileService",
fileName = cms.string(options.outputFile)
)
# Load emulation and RECO sequences
if not options.isMC:
process.load("L1Trigger.UCT2015.emulation_cfi")
else:
process.load("L1Trigger.UCT2015.emulationMC_cfi")
process.load("L1Trigger.UWTriggerTools.recoObjects_cfi")
process.load("Configuration.Geometry.GeometryIdeal_cff")
if 'CMSSW_6' in os.environ['CMSSW_VERSION']:
process.load("L1Trigger.UWTriggerTools.recoObjects_cfi")
else:
process.load("L1Trigger.UWTriggerTools.recoObjects53X_cfi")
# Common branches to add to the ntuple
common_ntuple_branches = cms.PSet(
index = cms.string("index"), # Index of reco object in the event
nRecoObjects = cms.string("nTotalObjects"), # Number of reco objects in the event
nPVs = cms.string("nPVs"), # number of reco'ed vertices in the event
# Run, lumi, event number
run = cms.string("id.run"),
lumi = cms.string("id.luminosityBlock"),
evt = cms.string("id.event"),
recoPt = cms.string("reco.pt"),
recoEta = cms.string("reco.eta"),
recoPhi = cms.string("reco.phi"),
# Whether there exists a L1/UCT object matched to reco
l1Match = cms.string("l1Match"),
l1gMatch = cms.string("l1gMatch"),
l1Pt = cms.string("? l1Match ? l1.pt : 0"),
l1Eta = cms.string("? l1Match ? l1.eta : 0"),
l1Phi = cms.string("? l1Match ? l1.phi : 0"),
l1Type = cms.string("? l1Match ? l1.type() : -1"),
# TODO add L1extra eta/phi indices
l1DPhi = cms.string("? l1Match ? deltaPhi(l1.phi, reco.phi) : -1"),
l1DR = cms.string("? l1Match ? deltaR(l1.eta, l1.phi, reco.eta, reco.phi) : -1"),
l1gPt = cms.string("? l1gMatch ? l1g.pt : 0"),
l1gEta = cms.string("? l1gMatch ? l1g.eta : 0"),
l1gPhi = cms.string("? l1gMatch ? l1g.phi : 0"),
# For tuning isolation and PU subtraction
l1gPUM0 = cms.string("? l1gMatch ? l1g.getFloat('puLevelPUM0', -4) : -2"),
l1gPU = cms.string("? l1gMatch ? l1g.getFloat('puLevel', -4) : -2"),
l1gPUUIC = cms.string("? l1gMatch ? l1g.getFloat('puLevelUIC', -4) : -2"),
l1gRegionEt = cms.string("? l1gMatch ? l1g.getFloat('associatedRegionEt', -4) : -2"),
l1gEtaCode = cms.vstring("? l1gMatch ? l1g.getInt('rgnEta') : 0", "I"),
l1gPhiCode = cms.vstring("? l1gMatch ? l1g.getInt('rgnPhi') : 0", "I"),
l1gDPhi = cms.string("? l1gMatch ? deltaPhi(l1g.phi, reco.phi) : -1"),
l1gDEta = cms.string("? l1gMatch ? l1g.eta - reco.eta : -10"),
l1gDR = cms.string("? l1gMatch ? deltaR(l1g.eta, l1g.phi, reco.eta, reco.phi) : -1"),
)
jet_branches = cms.PSet(
l1gNWRegion = cms.string("? l1gMatch ? l1g.getFloat('neighborNW_et', -4) : -2"),
l1gNERegion = cms.string("? l1gMatch ? l1g.getFloat('neighborNE_et', -4) : -2"),
l1gSERegion = cms.string("? l1gMatch ? l1g.getFloat('neighborSE_et', -4) : -2"),
l1gSWRegion = cms.string("? l1gMatch ? l1g.getFloat('neighborSW_et', -4) : -2"),
l1gWRegion = cms.string("? l1gMatch ? l1g.getFloat('neighborW_et', -4) : -2"),
l1gERegion = cms.string("? l1gMatch ? l1g.getFloat('neighborE_et', -4) : -2"),
l1gNRegion = cms.string("? l1gMatch ? l1g.getFloat('neighborN_et', -4) : -2"),
l1gSRegion = cms.string("? l1gMatch ? l1g.getFloat('neighborS_et', -4) : -2"),
l1gJetSeed = cms.string("? l1gMatch ? l1g.getFloat('jetseed_et', -4) : -2"),
)
# Specific to EG tau objects
egtau_branches = cms.PSet(
l1gSecondRegionEt = cms.string("? l1gMatch ? l1g.getFloat('associatedSecondRegionEt', -4) : -2"),
l1gJetPt = cms.string("? l1gMatch ? l1g.getFloat('associatedJetPt', -4) : -2"),
l1gEllIso = cms.string("? l1gMatch ? l1g.getInt('ellIsolation', -4) : -2"),
l1gTauVeto = cms.string("? l1gMatch ? l1g.getInt('tauVeto', -4) : -2"),
l1gMIP = cms.string("? l1gMatch ? l1g.getInt('mipBit', -4) : -2"),
l1gIsEle = cms.string("? l1gMatch ? l1g.getInt('isEle', -4) : -2"),
)
# Keep track of electron isolation values
electron_branches = cms.PSet(
dr03TkSumPt = cms.string("reco.dr03TkSumPt"),
dr03EcalRecHitSumEt = cms.string("reco.dr03EcalRecHitSumEt"),
dr03HcalTowerSumEt = cms.string("reco.dr03HcalTowerSumEt"),
dr03CombinedEt = cms.string("reco.dr03TkSumPt + reco.dr03EcalRecHitSumEt + reco.dr03HcalTowerSumEt"),
)
# Keep track of information about the ECAL/HCAL composition of taus
tau_branches = cms.PSet(
emFraction = cms.string("reco.emFraction"),
decayMode = cms.string("reco.decayMode"),
# recoEcal = cms.string("ecalEnergy"),
# recoHcal = cms.string("hcalEnergy"),
# EK - as far as I can tell, this does not use the lead track at all
# hcal = cms.string("reco.hcalTotOverPLead"),
)
process.isoTauEfficiency = cms.EDAnalyzer(
"EfficiencyTree",
recoSrc = cms.VInputTag("isoTaus"),
l1Src = cms.VInputTag(cms.InputTag("l1extraParticles", "Tau")),
l1GSrc = cms.VInputTag(cms.InputTag("UCT2015Producer", "IsolatedTauUnpacked")),
l1GPUSrc = cms.InputTag("UCT2015Producer", "PULevelPUM0Unpacked"),
# Max DR for RECO-trigger matching
maxDR = cms.double(0.5),
# Ntuple configuration
ntuple = cms.PSet(
common_ntuple_branches,
egtau_branches,
tau_branches,
)
)
# Define the tree producers
process.rlxTauEfficiency = cms.EDAnalyzer(
"EfficiencyTree",
recoSrc = cms.VInputTag("recoTaus"),
l1Src = cms.VInputTag(cms.InputTag("l1extraParticles", "Tau")),
l1GSrc = cms.VInputTag(cms.InputTag("UCT2015Producer", "RelaxedTauUnpacked")),
l1GPUSrc = cms.InputTag("UCT2015Producer", "PULevelPUM0Unpacked"),
# Max DR for RECO-trigger matching
maxDR = cms.double(0.5),
# Ntuple configuration
ntuple = cms.PSet(
common_ntuple_branches,
egtau_branches,
tau_branches,
)
)
# Define the tree producers
process.rlxTauPlusJetEfficiency = cms.EDAnalyzer(
"EfficiencyTree",
recoSrc = cms.VInputTag("recoTaus"),
l1Src = cms.VInputTag(
cms.InputTag("l1extraParticles", "Tau"),
cms.InputTag("l1extraParticles", "Central"),
),
l1GSrc = cms.VInputTag(cms.InputTag("UCT2015Producer", "RelaxedTauUnpacked")),
l1GPUSrc = cms.InputTag("UCT2015Producer", "PULevelPUM0Unpacked"),
# Max DR for RECO-trigger matching
maxDR = cms.double(0.5),
# Ntuple configuration
ntuple = cms.PSet(
common_ntuple_branches,
egtau_branches,
tau_branches,
)
)
# Note that the input electron collection is not isolated, this needs to be done
# at the ntuple level.
process.isoEGEfficiency = cms.EDAnalyzer(
"EfficiencyTree",
recoSrc = cms.VInputTag("recoElecs"),
l1Src = cms.VInputTag(cms.InputTag("l1extraParticles", "Isolated")),
l1GSrc = cms.VInputTag(cms.InputTag("UCT2015Producer", "IsolatedEGUnpacked")),
l1GPUSrc = cms.InputTag("UCT2015Producer", "PULevelPUM0Unpacked"),
# Max DR for RECO-trigger matching
maxDR = cms.double(0.5),
# Ntuple configuration
ntuple = cms.PSet(
common_ntuple_branches,
egtau_branches,
electron_branches,
)
)
process.rlxEGEfficiency = cms.EDAnalyzer(
"EfficiencyTree",
recoSrc = cms.VInputTag("recoElecs"),
l1Src = cms.VInputTag(
# These two collections
cms.InputTag("l1extraParticles", "NonIsolated"),
cms.InputTag("l1extraParticles", "Isolated"),
),
l1GSrc = cms.VInputTag(cms.InputTag("UCT2015Producer", "RelaxedEGUnpacked")),
l1GPUSrc = cms.InputTag("UCT2015Producer", "PULevelPUM0Unpacked"),
# Max DR for RECO-trigger matching
maxDR = cms.double(0.5),
# Ntuple configuration
ntuple = cms.PSet(
common_ntuple_branches,
egtau_branches,
electron_branches,
)
)
# So we can compare relaxed UCT + ntuple isolation cuts versus stock L1 IsoEG
process.rlxUCTisoL1EGEfficiency = cms.EDAnalyzer(
"EfficiencyTree",
recoSrc = cms.VInputTag("recoElecs"),
l1Src = cms.VInputTag(
cms.InputTag("l1extraParticles", "Isolated"),
),
l1GSrc = cms.VInputTag(cms.InputTag("UCT2015Producer", "RelaxedEGUnpacked")),
l1GPUSrc = cms.InputTag("UCT2015Producer", "PULevelPUM0Unpacked"),
# Max DR for RECO-trigger matching
maxDR = cms.double(0.5),
# Ntuple configuration
ntuple = cms.PSet(
common_ntuple_branches,
egtau_branches,
electron_branches,
)
)
# Package all of the lepton efficiencies into one sequence
process.leptonEfficiencies = cms.Sequence(
process.isoTauEfficiency *
process.rlxTauEfficiency
*process.rlxTauPlusJetEfficiency *
process.isoEGEfficiency *
process.rlxEGEfficiency *
process.rlxUCTisoL1EGEfficiency
)
process.jetEfficiency = cms.EDAnalyzer(
"EfficiencyTree",
recoSrc = cms.VInputTag("recoJets"),
l1Src = cms.VInputTag(
# Combine central jets + tau + forward jets
cms.InputTag("l1extraParticles", "Central"),
cms.InputTag("l1extraParticles", "Tau"),
cms.InputTag("l1extraParticles", "Forward"),
),
l1GSrc = cms.VInputTag(cms.InputTag("UCT2015Producer", "JetUnpacked")),
l1GPUSrc = cms.InputTag("UCT2015Producer", "PULevelPUM0Unpacked"),
# Max DR for RECO-trigger matching
maxDR = cms.double(0.5),
# Ntuple configuration
ntuple = cms.PSet(
common_ntuple_branches, jet_branches
)
)
process.corrjetEfficiency = cms.EDAnalyzer(
"EfficiencyTree",
recoSrc = cms.VInputTag("recoJets"),
l1Src = cms.VInputTag(
# Combine central jets + tau + forward jets
cms.InputTag("l1extraParticles", "Central"),
cms.InputTag("l1extraParticles", "Tau"),
cms.InputTag("l1extraParticles", "Forward"),
),
l1GSrc = cms.VInputTag(cms.InputTag("UCT2015Producer", "CorrJetUnpacked")),
l1GPUSrc = cms.InputTag("UCT2015Producer", "PULevelPUM0Unpacked"),
# Max DR for RECO-trigger matching
maxDR = cms.double(0.5),
# Ntuple configuration
ntuple = cms.PSet(
common_ntuple_branches,
)
)
process.highPtPF = cms.EDFilter(
"GenericPFCandidateSelector",
src = cms.InputTag("particleFlow"),
cut = cms.string("charge != 0 && pt > 10")
)
process.printPF = cms.EDAnalyzer(
"CandInfoPrinter",
src = cms.InputTag("highPtPF"),
printHeader=cms.bool(True),
pt = cms.string("pt"),
eta = cms.string("eta"),
pdgId = cms.string("pdgId")
)
process.printTaus = cms.EDAnalyzer(
"CandInfoPrinter",
src = cms.InputTag("isoTaus"),
printHeader=cms.bool(True),
pt = cms.string("pt"),
ncands = cms.string("signalPFCands.size"),
eta = cms.string("eta"),
phi = cms.string("phi"),
dm = cms.string("decayMode"),
)
process.printTaus2 = cms.EDAnalyzer(
"CandInfoPrinter",
src = cms.InputTag("recoTaus"),
printHeader=cms.bool(True),
pt = cms.string("pt"),
ncands = cms.string("signalPFCands.size"),
eta = cms.string("eta"),
phi = cms.string("phi"),
dm = cms.string("decayMode"),
)
process.printTPGs = cms.EDFilter(
"TPGDebugger",
ecalSrc = cms.InputTag("ecalDigis:EcalTriggerPrimitives"),
#hcalSrc = cms.InputTag("hackHCALMIPs"),
hcalSrc = cms.InputTag("simHcalTriggerPrimitiveDigis"),
toPrint = cms.VPSet(
# everything in all events
cms.PSet(
run = cms.uint32(0),
minIEta = cms.int32(-1000),
maxIEta = cms.int32(1000),
maxIPhi = cms.int32(1000),
minIPhi = cms.int32(-1000),
)
)
)
reco_object_step = process.recoObjects
# Determine if we want truth matching or not.
if options.isMC:
reco_object_step = process.recoObjects_truthMatched
process.rlxTauPlusJetEfficiency.recoSrc = cms.VInputTag("trueTaus")
process.isoTauEfficiency.recoSrc = cms.VInputTag("trueTaus")
process.rlxTauEfficiency.recoSrc = cms.VInputTag("trueTaus")
process.isoTauEfficiency.recoSrc = cms.VInputTag("trueTaus")
process.rlxTauEfficiency.recoSrc = cms.VInputTag("trueTaus")
process.printTaus.src=cms.InputTag("trueTaus")
process.p1 = cms.Path(
reco_object_step *
process.emulationSequence
# *process.printTaus
#process.highPtPF *
#process.printPF *
#process.printTPGs *
#process.dump *
#process.pionEfficiency *
)
print "Building Stage1 trees"
process.p1 += process.leptonEfficiencies
################################################################################
### Semileptonic ttbar skim for sums ###########################################
################################################################################
process.oneMuon = cms.EDFilter(
"CandViewCountFilter",
src = cms.InputTag("tightMuons"),
minNumber = cms.uint32(1),
)
process.jetsPt30 = cms.EDFilter(
"PFJetSelector",
src = cms.InputTag("ak5PFJetsNOMuons"),
filter = cms.bool(True),
cut = cms.string("pt > 30")
)
process.atLeastThreeJets = cms.EDFilter(
"CandViewCountFilter",
src = cms.InputTag("jetsPt30"),
minNumber = cms.uint32(3),
)
# Computing CaloMET significance
process.load("RecoMET.METProducers.CaloMETSignif_cfi")
process.metsignificance.noHF = True
# Computing RECO level Sum ET and Sum HT
process.load("L1Trigger.UWTriggerTools.PFSumET_cfi")
process.l1SumsEfficiency = cms.EDAnalyzer(
"SumsEfficiencyTree",
tree2015 =cms.bool(False),
l1MHTSrc = cms.InputTag("l1extraParticles", "MHT"),
l1METSrc = cms.InputTag("l1extraParticles", "MET"),
# Evan said change l1METSigSrc to match recoMETSigSrc
l1METSigSrc = cms.InputTag("UCT2015Producer", "METSIGUnpacked"),
#l1METSigSrc = cms.InputTag("metsignificance"),
# fixme
l1SHTSrc = cms.InputTag("l1extraParticles", "MHT"),
l1SETSrc = cms.InputTag("l1extraParticles", "MET"),
recoMHTSrc = cms.InputTag("pfSumET", "mht"),
recoMETSrc = cms.InputTag("metNoHF"), # calomet
#recoMETSigSrc = cms.InputTag("metsignificance"), # does not work in 62X - why?
recoMETSigSrc = cms.InputTag("metNoHF"),
recoSHTSrc = cms.InputTag("pfSumET", "sht"),
recoSETSrc = cms.InputTag("pfSumET", "set"),
recoPFMETSrc = cms.InputTag("pfMet"), # pfmet
recoFile = cms.bool(True)
)
process.uctSumsEfficiency = cms.EDAnalyzer(
"SumsEfficiencyTree",
tree2015 =cms.bool(True),
l1MHTSrc = cms.InputTag("UCT2015Producer", "MHTUnpacked"),
l1METSrc = cms.InputTag("UCT2015Producer", "METUnpacked"),
l1METSigSrc = cms.InputTag("UCT2015Producer", "METSIGUnpacked"),
l1SHTSrc = cms.InputTag("UCT2015Producer", "SHTUnpacked"),
l1SETSrc = cms.InputTag("UCT2015Producer", "SETUnpacked"),
recoMETSrc = cms.InputTag("metNoHF"), # calomet
recoPFMETSrc = cms.InputTag("pfMet"), # pfmet
recoMHTSrc = cms.InputTag("pfSumET", "mht"), # calomet
# recoMETSigSrc = cms.InputTag("metsignificance"), # # does not work in 62X - why?
recoMETSigSrc = cms.InputTag("metNoHF"),
recoSHTSrc = cms.InputTag("pfSumET", "sht"),
recoSETSrc = cms.InputTag("pfSumET", "set"),
recoFile = cms.bool(True)
)
# Make a version of UCT without PU corrections.
process.UCT2015ProducerNoPU = process.UCT2015Producer.clone(
puMultCorrect = False
)
process.uctSumsNoPUEfficiency = process.uctSumsEfficiency.clone(
l1MHTSrc = cms.InputTag("UCT2015ProducerNoPU", "MHTUnpacked"),
l1METSrc = cms.InputTag("UCT2015ProducerNoPU", "METUnpacked"),
l1METSigSrc = cms.InputTag("UCT2015ProducerNoPU", "METSIGUnpacked"),
l1SHTSrc = cms.InputTag("UCT2015ProducerNoPU", "SHTUnpacked"),
l1SETSrc = cms.InputTag("UCT2015ProducerNoPU", "SETUnpacked"),
)
process.semileptonicTTBarPath = cms.Path(
process.cleanJets *
process.jetEfficiency *
process.corrjetEfficiency*
process.oneMuon *
process.jetsPt30 *
process.atLeastThreeJets *
process.pfSumET *
#process.metsignificance *
process.l1SumsEfficiency *
process.uctSumsEfficiency
# w/o PU corrections
#process.UCT2015ProducerNoPU *
#process.uctSumsNoPUEfficiency
)
# Output definition
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('out.root'),
outputCommands = cms.untracked.vstring(
'drop *',
'keep *_*_*_L1UCTEfficiency',
)
)
process.out = cms.EndPath(process.output)
process.schedule = cms.Schedule(
process.p1,
process.semileptonicTTBarPath
# process.out
)
# Make the framework shut up.
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cerr.FwkReport.reportEvery = 1000
# Spit out filter efficiency at the end.
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(False))
#eic = options.eicIsolationThreshold
#print "Setting EIC threshold to %i" % eic
#process.RCTConfigProducers.eicIsolationThreshold = eic
#hActivity = options.hActivityCut
#print "Setting hActivity threshold to %f" % hActivity
#process.RCTConfigProducers.hActivityCut = hActivity
|
Python
|
CL
|
0cb9960cfa563dfcd96375520f64e8c850fc9a4bfed72123bf8c9888d952f3dd
|
#=============================================
#utf-8 2020-03-10 16:16:19
#Finding the optimal parameters by minimizing AIC
import warnings
import itertools
import pandas as pd
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
from pandas import read_excel
plt.style.use('fivethirtyeight')
series = read_excel('forecasting.xls', index_col=0, parse_dates=True, squeeze=True)
df = series['K54D']
# Define the p, d and q parameters to take any value between 0 and 2
p = d = q = range(0, 2)
# Generate all different combinations of p, q and q triplets
pdq = list(itertools.product(p, d, q))
# Generate all different combinations of seasonal p, q and q triplets
seasonal_pdq = [(x[0], x[1], x[2], 12) for x in list(itertools.product(p, d, q))]
warnings.filterwarnings("ignore") # specify to ignore warning messages
for param in pdq:
for param_seasonal in seasonal_pdq:
try:
mod = sm.tsa.statespace.SARIMAX(df,
order=param,
seasonal_order=param_seasonal,
enforce_stationarity=False,
enforce_invertibility=False)
results = mod.fit()
print('ARIMA{}x{}12 - AIC:{}'.format(param, param_seasonal, results.aic))
except:
continue
#=============================================
#ARIMA
from pandas import read_excel
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
plt.style.use('fivethirtyeight')
###############################
series = read_excel('forecasting.xls', index_col=0, parse_dates=True, squeeze=True)
df = series['K54D']
# ARIMA model with (p, d, q)=(1, 1, 1)
mod = sm.tsa.statespace.SARIMAX(df, order=(1,1,1),
seasonal_order=(0,1,1,12))
results = mod.fit(disp=False)
print(results.summary())
# graphical statistics of model (correlogram = ACF plot)
results.plot_diagnostics(figsize=(15, 12))
plt.show()
#============================================
# this code requires the fitted forecasts (for accuracy evaluation) to start 01 Jan 1979.
pred = results.get_prediction(start=pd.to_datetime('2019-01-01'), dynamic=False)
pred_ci = pred.conf_int()
# this code requires the whole plot to start in 1956 (start year of data)
ax = df['2000':].plot(label='Original data')
pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7)
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
plt.legend()
plt.show()
#=============================================
# MSE evaluation
y_forecasted = pred.predicted_mean
y_truth = df['2005-01-01':]
# Compute the mean square error
mse = ((y_forecasted - y_truth) ** 2).mean()
print('MSE for ARIMA is', mse)
#============================================
#Dynamic Forecasting
pred = results.get_prediction(start=pd.to_datetime('2005-01-01'), dynamic=True, full_results=True)
pred_ci = pred.conf_int()
# this code requires the whole plot to start in 1956 (start year of data)
ax = df['2000':].plot(label='Original data')
pred.predicted_mean.plot(ax=ax, label='Dynamic Forecast')
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.2)
ax.set_xlabel('Date')
ax.set_ylabel('CO2 Levels')
plt.legend()
plt.show()
#=============================================
# MSE evaluation
y_forecasted = pred.predicted_mean
y_truth = df['2005-01-01':]
# Compute the mean square error
mse = ((y_forecasted - y_truth) ** 2).mean()
print('MSE for Dynamic Forecasting is', mse)
#=============================================
# get forecast one year ahead in future
pred_uc = results.get_forecast(steps=12)
# Get confidence intervals of forecasts
pred_ci = pred_uc.conf_int()
print(pred_uc.predicted_mean)
# plotting forecasts ahead
ax = df.plot(label='Original data')
pred_uc.predicted_mean.plot(ax=ax, label='Forecast values', title='Forecast plot with confidence interval')
ax.fill_between(pred_ci.index,
pred_ci.iloc[:, 0],
pred_ci.iloc[:, 1], color='k', alpha=.25)
plt.legend()
plt.show()
#=============================================
|
Python
|
CL
|
632ff643083a98370dee56cb10acedb2bd06b2992d074991684afce1698a8a4f
|
import os.path as osp
from utils import (
is_,
arg,
parse_args,
mkdir_p,
time_stamp,
init_logging,
get_logger,
write_lines,
read_lines,
)
# TODO only do parts of pipeline
opts = parse_args(
arg('-name', default='fr-clean'),
arg('-f', '--file', default='train/french_clean'),
# if you want 18th word do 17
arg('-i', '--start', type=int, default=76),
arg('-j', '--stop', type=int),
arg('-q', '--query', nargs='*'),
arg('-s', '--src', default='fr'),
arg('-t', '--target', default='en'),
arg('-n', '--n-img', type=int, default=20),
# pipeline
arg('-is', '--sch', action='store_true'),
arg('-rs', '--rsch', action='store_true'),
arg('-pred', action='store_true'),
# use saved
arg('-load-urls'),
arg('-load-preds'),
)
name = opts.name + '__' if is_(opts.name) else ''
RESULT_PREFIX = osp.join('reverse-img-final-preds',
'%s_to_%s' % (opts.src, opts.target),
name + time_stamp())
mkdir_p(RESULT_PREFIX)
fh = init_logging(file=osp.join(RESULT_PREFIX, 'log.log'),
stdout=True)
LOGGER = get_logger(__name__, main=True)
from nlp_utils import get_words
from image_search import image_search
from reverse_image_search import reverse_search_urls
queries = []
if is_(opts.file):
queries.extend(get_words(opts.file, i=opts.start, j=opts.stop))
if is_(opts.query):
queries.extend(opts.query)
for i, q in enumerate(queries):
LOGGER.info('+++ QUERY #%s: %s +++\n' % (i, q))
RESULT_DIR = osp.join(RESULT_PREFIX, q)
mkdir_p(RESULT_DIR)
if opts.load_urls:
urls = read_lines(osp.join(opts.load_urls, q, 'urls.txt'))
elif opts.sch:
urls = image_search(q, opts.target)
write_lines(urls, osp.join(RESULT_DIR, 'urls.txt'))
if opts.load_preds:
preds = read_lines(osp.join(opts.load_preds, q, 'preds.txt'))
elif opts.rsch:
preds = reverse_search_urls(q, *urls, lang=opts.target,
n_img=opts.n_img)
write_lines(preds, osp.join(RESULT_DIR, 'preds.txt'))
# TODO
# if opts.pred:
# for top_n in 1, 3, 5, 10, 20, 25:
# for use_lang in True, False:
# pred_filtered = filter_results(preds, q, lang=opts.lang)
# for p in pred_filtered:
# print(p)
fh.close()
|
Python
|
CL
|
f94e98d88cd19f8aa78adee02f423fbc44cf7ca7584e1833de2e4fcf5bb70b76
|
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Examples for ResourceControllerV2
"""
import os
import time
import pytest
from ibm_cloud_sdk_core import ApiException, read_external_sources
from ibm_platform_services.resource_controller_v2 import *
#
# This file provides an example of how to use the Resource Controller service.
#
# The following configuration properties are assumed to be defined:
#
# RESOURCE_CONTROLLER_URL=<service url>
# RESOURCE_CONTROLLER_AUTH_TYPE=iam
# RESOURCE_CONTROLLER_AUTH_URL=<IAM Token Service url>
# RESOURCE_CONTROLLER_APIKEY=<User's IAM API Key>
# RESOURCE_CONTROLLER_RESOURCE_GROUP=<Short ID of the user's resource group>
# RESOURCE_CONTROLLER_PLAN_ID=<Unique ID of the plan associated with the offering>
# RESOURCE_CONTROLLER_ACCOUNT_ID=<User's account ID>
# RESOURCE_CONTROLLER_ALIAS_TARGET_CRN=<The CRN of target name(space) in a specific environment>
# RESOURCE_CONTROLLER_BINDING_TARGET_CRN=<The CRN of application to bind to in a specific environment>
#
# These configuration properties can be exported as environment variables, or stored
# in a configuration file and then:
# export IBM_CREDENTIALS_FILE=<name of configuration file>
#
config_file = 'resource_controller.env'
resource_controller_service = None
config = None
instanceGuid = None
aliasGuid = None
bindingGuid = None
instanceKeyGuid = None
resourceGroup = None
resourcePlanId = None
accountId = None
aliasTargetCRN = None
bindingTargetCRN = None
reclamationId = None
resourceInstanceName = 'RcSdkInstance1Python'
resourceInstanceUpdateName = 'RcSdkInstanceUpdate1Python'
aliasName = 'RcSdkAlias1Python'
aliasUpdateName = 'RcSdkAliasUpdate1Python'
bindingName = 'RcSdkBinding1Python'
bindingUpdateName = 'RcSdkBindingUpdate1Python'
keyName = 'RcSdkKey1Python'
keyUpdateName = 'RcSdkKeyUpdate1Python'
targetRegion = 'global'
reclaimAction = 'reclaim'
##############################################################################
# Start of Examples for Service: ResourceControllerV2
##############################################################################
# region
class TestResourceControllerV2Examples():
"""
Example Test Class for ResourceControllerV2
"""
@classmethod
def setup_class(cls):
global resource_controller_service
if os.path.exists(config_file):
os.environ['IBM_CREDENTIALS_FILE'] = config_file
# begin-common
resource_controller_service = ResourceControllerV2.new_instance()
# end-common
assert resource_controller_service is not None
# Load the configuration
global config
config = read_external_sources(
ResourceControllerV2.DEFAULT_SERVICE_NAME)
global resourceGroup
resourceGroup = config['RESOURCE_GROUP']
global resourcePlanId
resourcePlanId = config['RECLAMATION_PLAN_ID']
global accountId
accountId = config['ACCOUNT_ID']
global aliasTargetCRN
aliasTargetCRN = config['ALIAS_TARGET_CRN']
global bindingTargetCRN
bindingTargetCRN = config['BINDING_TARGET_CRN']
print('Setup complete.')
needscredentials = pytest.mark.skipif(
not os.path.exists(config_file), reason="External configuration not available, skipping..."
)
@needscredentials
def test_create_resource_instance_example(self):
"""
create_resource_instance request example
"""
try:
global instanceGuid, resourceInstanceName, targetRegion, resourceGroup, resourcePlanId
# begin-create_resource_instance
resource_instance = resource_controller_service.create_resource_instance(
name=resourceInstanceName,
target=targetRegion,
resource_group=resourceGroup,
resource_plan_id=resourcePlanId
).get_result()
print(json.dumps(resource_instance, indent=2))
instanceGuid = resource_instance.get('guid')
# end-create_resource_instance
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_resource_instance_example(self):
"""
get_resource_instance request example
"""
try:
global instanceGuid
# begin-get_resource_instance
resource_instance = resource_controller_service.get_resource_instance(
id=instanceGuid
).get_result()
print(json.dumps(resource_instance, indent=2))
# end-get_resource_instance
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_update_resource_instance_example(self):
"""
update_resource_instance request example
"""
try:
global instanceGuid, resourceInstanceUpdateName
# begin-update_resource_instance
params = {}
params['example'] = 'property'
resource_instance = resource_controller_service.update_resource_instance(
id=instanceGuid,
name=resourceInstanceUpdateName,
parameters=params
).get_result()
print(json.dumps(resource_instance, indent=2))
# end-update_resource_instance
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_list_resource_instances_example(self):
"""
list_resource_instances request example
"""
try:
global resourceInstanceName
# begin-list_resource_instances
resource_instances_list = resource_controller_service.list_resource_instances(
name=resourceInstanceName
).get_result()
print(json.dumps(resource_instances_list, indent=2))
# end-list_resource_instances
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_create_resource_alias_example(self):
"""
create_resource_alias request example
"""
try:
global instanceGuid, aliasName, aliasGuid, aliasTargetCRN
# begin-create_resource_alias
resource_alias = resource_controller_service.create_resource_alias(
name=aliasName,
source=instanceGuid,
target=aliasTargetCRN
).get_result()
aliasGuid = resource_alias.get('guid')
print(json.dumps(resource_alias, indent=2))
# end-create_resource_alias
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_resource_alias_example(self):
"""
get_resource_alias request example
"""
try:
global aliasGuid
# begin-get_resource_alias
resource_alias = resource_controller_service.get_resource_alias(
id=aliasGuid
).get_result()
print(json.dumps(resource_alias, indent=2))
# end-get_resource_alias
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_update_resource_alias_example(self):
"""
update_resource_alias request example
"""
try:
global aliasGuid, aliasUpdateName
# begin-update_resource_alias
resource_alias = resource_controller_service.update_resource_alias(
id=aliasGuid,
name=aliasUpdateName
).get_result()
print(json.dumps(resource_alias, indent=2))
# end-update_resource_alias
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_list_resource_aliases_example(self):
"""
list_resource_aliases request example
"""
try:
# begin-list_resource_aliases
resource_aliases_list = resource_controller_service.list_resource_aliases(
name=aliasName
).get_result()
print(json.dumps(resource_aliases_list, indent=2))
# end-list_resource_aliases
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_create_resource_binding_example(self):
"""
create_resource_binding request example
"""
try:
global aliasGuid, bindingGuid, bindingName, bindingTargetCRN
# begin-create_resource_binding
resource_binding = resource_controller_service.create_resource_binding(
source=aliasGuid,
target=bindingTargetCRN,
name=bindingName
).get_result()
bindingGuid = resource_binding.get('guid')
print(json.dumps(resource_binding, indent=2))
# end-create_resource_binding
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_resource_binding_example(self):
"""
get_resource_binding request example
"""
try:
global bindingGuid
# begin-get_resource_binding
resource_binding = resource_controller_service.get_resource_binding(
id=bindingGuid
).get_result()
print(json.dumps(resource_binding, indent=2))
# end-get_resource_binding
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_update_resource_binding_example(self):
"""
update_resource_binding request example
"""
try:
global bindingGuid, bindingUpdateName
# begin-update_resource_binding
resource_binding = resource_controller_service.update_resource_binding(
id=bindingGuid,
name=bindingUpdateName
).get_result()
print(json.dumps(resource_binding, indent=2))
# end-update_resource_binding
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_list_resource_bindings_example(self):
"""
list_resource_bindings request example
"""
try:
# begin-list_resource_bindings
resource_bindings_list = resource_controller_service.list_resource_bindings(
name=bindingName
).get_result()
print(json.dumps(resource_bindings_list, indent=2))
# end-list_resource_bindings
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_create_resource_key_example(self):
"""
create_resource_key request example
"""
try:
global instanceGuid, instanceKeyGuid, keyName
# begin-create_resource_key
resource_key = resource_controller_service.create_resource_key(
name=keyName,
source=instanceGuid
).get_result()
instanceKeyGuid = resource_key.get('guid')
print(json.dumps(resource_key, indent=2))
# end-create_resource_key
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_get_resource_key_example(self):
"""
get_resource_key request example
"""
try:
global instanceKeyGuid
# begin-get_resource_key
resource_key = resource_controller_service.get_resource_key(
id=instanceKeyGuid
).get_result()
print(json.dumps(resource_key, indent=2))
# end-get_resource_key
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_update_resource_key_example(self):
"""
update_resource_key request example
"""
try:
global instanceKeyGuid, keyUpdateName
# begin-update_resource_key
resource_key = resource_controller_service.update_resource_key(
id=instanceKeyGuid,
name=keyUpdateName
).get_result()
print(json.dumps(resource_key, indent=2))
# end-update_resource_key
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_list_resource_keys_example(self):
"""
list_resource_keys request example
"""
try:
# begin-list_resource_keys
resource_keys_list = resource_controller_service.list_resource_keys(
name=keyName
).get_result()
print(json.dumps(resource_keys_list, indent=2))
# end-list_resource_keys
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_delete_resource_binding_example(self):
"""
delete_resource_binding request example
"""
try:
global bindingGuid
# begin-delete_resource_binding
response = resource_controller_service.delete_resource_binding(
id=bindingGuid
).get_result()
print(json.dumps(response, indent=2))
# end-delete_resource_binding
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_delete_resource_key_example(self):
"""
delete_resource_key request example
"""
try:
global instanceKeyGuid
# begin-delete_resource_key
response = resource_controller_service.delete_resource_key(
id=instanceKeyGuid
).get_result()
print(json.dumps(response, indent=2))
# end-delete_resource_key
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_delete_resource_alias_example(self):
"""
delete_resource_alias request example
"""
try:
global aliasGuid
# begin-delete_resource_alias
response = resource_controller_service.delete_resource_alias(
id=aliasGuid
).get_result()
print(json.dumps(response, indent=2))
# end-delete_resource_alias
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_lock_resource_instance_example(self):
"""
lock_resource_instance request example
"""
try:
global instanceGuid
# begin-lock_resource_instance
resource_instance = resource_controller_service.lock_resource_instance(
id=instanceGuid
).get_result()
print(json.dumps(resource_instance, indent=2))
# end-lock_resource_instance
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_unlock_resource_instance_example(self):
"""
unlock_resource_instance request example
"""
try:
global instanceGuid
# begin-unlock_resource_instance
resource_instance = resource_controller_service.unlock_resource_instance(
id=instanceGuid
).get_result()
print(json.dumps(resource_instance, indent=2))
# end-unlock_resource_instance
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_delete_resource_instance_example(self):
"""
delete_resource_instance request example
"""
try:
global instanceGuid
# begin-delete_resource_instance
response = resource_controller_service.delete_resource_instance(
id=instanceGuid
).get_result()
print(json.dumps(response, indent=2))
# end-delete_resource_instance
#wait for reclamation object to be created
time.sleep(20)
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_list_reclamations_example(self):
"""
list_reclamations request example
"""
try:
global instanceGuid, reclamationId, accountId
# begin-list_reclamations
reclamations_list = resource_controller_service.list_reclamations(
account_id=accountId
).get_result()
for res in reclamations_list.get('resources'):
if res.get('resource_instance_id') == instanceGuid:
reclamationId = res.get('id')
print(json.dumps(reclamations_list, indent=2))
# end-list_reclamations
except ApiException as e:
pytest.fail(str(e))
@needscredentials
def test_run_reclamation_action_example(self):
"""
run_reclamation_action request example
"""
try:
global reclamationId, reclaimAction
# begin-run_reclamation_action
reclamation = resource_controller_service.run_reclamation_action(
id=reclamationId,
action_name=reclaimAction
).get_result()
print(json.dumps(reclamation, indent=2))
# end-run_reclamation_action
except ApiException as e:
pytest.fail(str(e))
# print(str(e))
# endregion
##############################################################################
# End of Examples for Service: ResourceControllerV2
##############################################################################
|
Python
|
CL
|
14a85488566b7fe4ba7d3f43b19578c2e7111f98d372312c8e373950290b7336
|
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define the Impala target context for Numba and 'builtin' impls"""
from __future__ import absolute_import
import llvm.core as lc
import llvm.passes as lp
import llvm.ee as le
from numba import types as ntypes
from numba import cgutils, lowering
from numba.targets.base import BaseContext
from numba.targets.imputils import Registry, implement, impl_attribute
from impala.udf import stringimpl
from impala.udf.abi import ABIHandling, raise_return_type
from impala.udf.types import (
FunctionContext, AnyVal, BooleanVal, BooleanValType, TinyIntVal,
TinyIntValType, SmallIntVal, SmallIntValType, IntVal, IntValType,
BigIntVal, BigIntValType, FloatVal, FloatValType, DoubleVal, DoubleValType,
StringVal, StringValType)
from impala.udf.impl_utils import (
AnyValStruct, BooleanValStruct, TinyIntValStruct, SmallIntValStruct,
IntValStruct, BigIntValStruct, FloatValStruct, DoubleValStruct,
StringValStruct)
from impala.udf.impl_utils import (
precompiled, _get_is_null, _set_is_null, _conv_numba_struct_to_clang)
registry = Registry()
register_function = registry.register
register_attribute = registry.register_attr
# ctor impls
def _ctor_factory(Struct, Type, *input_args):
@implement(Type, *input_args)
def Val_ctor(context, builder, sig, args):
[x] = args
v = Struct(context, builder)
_set_is_null(builder, v, cgutils.false_bit)
v.val = x
return v._getvalue()
return register_function(Val_ctor)
BooleanVal_ctor = _ctor_factory(BooleanValStruct, BooleanValType, ntypes.int8)
TinyIntVal_ctor = _ctor_factory(TinyIntValStruct, TinyIntValType, ntypes.int8)
SmallIntVal_ctor = _ctor_factory(SmallIntValStruct, SmallIntValType, ntypes.int16)
IntVal_ctor = _ctor_factory(IntValStruct, IntValType, ntypes.int32)
BigIntVal_ctor = _ctor_factory(BigIntValStruct, BigIntValType, ntypes.int64)
FloatVal_ctor = _ctor_factory(FloatValStruct, FloatValType, ntypes.float32)
DoubleVal_ctor = _ctor_factory(DoubleValStruct, DoubleValType, ntypes.float64)
@register_function
@implement(StringValType, ntypes.string)
def StringVal_ctor(context, builder, sig, args):
"""StringVal(ntypes.string)"""
[x] = args
iv = StringValStruct(context, builder)
_set_is_null(builder, iv, cgutils.false_bit)
fndesc = lowering.ExternalFunctionDescriptor(
'strlen', ntypes.uintp, [ntypes.CPointer(ntypes.char)])
func = context.declare_external_function(
cgutils.get_module(builder), fndesc)
strlen_x = context.call_external_function(
builder, func, fndesc.argtypes, [x])
len_x = builder.trunc(strlen_x, lc.Type.int(32))
iv.len = len_x
iv.ptr = x
return iv._getvalue()
# *Val attributes
def _is_null_attr_factory(Struct, Val):
@impl_attribute(Val, "is_null", ntypes.boolean)
def Val_is_null(context, builder, typ, value):
v = Struct(context, builder, value=value)
is_null = _get_is_null(builder, v)
return is_null
return register_attribute(Val_is_null)
def _val_attr_factory(Struct, Val, retty):
@impl_attribute(Val, "val", retty)
def Val_val(context, builder, typ, value):
v = Struct(context, builder, value=value)
return v.val
return register_attribute(Val_val)
# *Val.is_null
BooleanVal_is_null = _is_null_attr_factory(BooleanValStruct, BooleanVal)
TinyIntVal_is_null = _is_null_attr_factory(TinyIntValStruct, TinyIntVal)
SmallIntVal_is_null = _is_null_attr_factory(SmallIntValStruct, SmallIntVal)
IntVal_is_null = _is_null_attr_factory(IntValStruct, IntVal)
BigIntVal_is_null = _is_null_attr_factory(BigIntValStruct, BigIntVal)
FloatVal_is_null = _is_null_attr_factory(FloatValStruct, FloatVal)
DoubleVal_is_null = _is_null_attr_factory(DoubleValStruct, DoubleVal)
StringVal_is_null = _is_null_attr_factory(StringValStruct, StringVal)
# *Val.val
BooleanVal_val = _val_attr_factory(BooleanValStruct, BooleanVal, ntypes.int8)
TinyIntVal_val = _val_attr_factory(TinyIntValStruct, TinyIntVal, ntypes.int8)
SmallIntVal_val = _val_attr_factory(SmallIntValStruct, SmallIntVal, ntypes.int16)
IntVal_val = _val_attr_factory(IntValStruct, IntVal, ntypes.int32)
BigIntVal_val = _val_attr_factory(BigIntValStruct, BigIntVal, ntypes.int64)
FloatVal_val = _val_attr_factory(FloatValStruct, FloatVal, ntypes.float32)
DoubleVal_val = _val_attr_factory(DoubleValStruct, DoubleVal, ntypes.float64)
@register_attribute
@impl_attribute(StringVal, "len", ntypes.int32)
def StringVal_len(context, builder, typ, value):
"""StringVal::len"""
iv = StringValStruct(context, builder, value=value)
return iv.len
@register_attribute
@impl_attribute(StringVal, "ptr", ntypes.CPointer(ntypes.uint8))
def StringVal_ptr(context, builder, typ, value):
"""StringVal::ptr"""
iv = StringValStruct(context, builder, value=value)
return iv.ptr
# impl "builtins"
@register_function
@implement('is', AnyVal, ntypes.none)
def anyval_is_none_impl(context, builder, sig, args):
[x, y] = args
val = AnyValStruct(context, builder, value=x)
return builder.trunc(val.is_null, lc.Type.int(1))
def starval_is_none_impl(context, builder, sig, args):
[x, y] = args
x = builder.extract_value(x, 0)
val = AnyValStruct(context, builder, value=x)
return builder.trunc(val.is_null, lc.Type.int(1))
register_function(implement('is', BooleanVal, ntypes.none)(starval_is_none_impl))
register_function(implement('is', TinyIntVal, ntypes.none)(starval_is_none_impl))
register_function(implement('is', SmallIntVal, ntypes.none)(starval_is_none_impl))
register_function(implement('is', IntVal, ntypes.none)(starval_is_none_impl))
register_function(implement('is', BigIntVal, ntypes.none)(starval_is_none_impl))
register_function(implement('is', FloatVal, ntypes.none)(starval_is_none_impl))
register_function(implement('is', DoubleVal, ntypes.none)(starval_is_none_impl))
@register_function
@implement(ntypes.len_type, StringVal)
def len_stringval_impl(context, builder, sig, args):
[s] = args
val = StringValStruct(context, builder, value=s)
return val.len
@register_function
@implement("==", ntypes.CPointer(ntypes.uint8), ntypes.CPointer(ntypes.uint8))
def eq_ptr_impl(context, builder, sig, args):
[p1, p2] = args
return builder.icmp(lc.ICMP_EQ, p1, p2)
@register_function
@implement("==", StringVal, StringVal)
def eq_stringval(context, builder, sig, args):
module = cgutils.get_module(builder)
precomp_func = context._get_precompiled_function("EqStringValImpl")
func = module.get_or_insert_function(
precomp_func.type.pointee, precomp_func.name)
[s1, s2] = args
cs1 = _conv_numba_struct_to_clang(builder, s1, func.args[0].type)
cs2 = _conv_numba_struct_to_clang(builder, s2, func.args[1].type)
result = builder.call(func, [cs1, cs2])
return result # ret bool so no need to raise type
@register_function
@implement("!=", StringVal, StringVal)
def neq_stringval(context, builder, sig, args):
eq = eq_stringval(context, builder, sig, args)
neq = builder.xor(lc.Constant.int(lc.Type.int(1), 1), eq)
return neq
@register_function
@implement("getitem", StringVal, ntypes.intc)
def getitem_stringval(context, builder, sig, args):
module = cgutils.get_module(builder)
precomp_func = context._get_precompiled_function("GetItemStringValImpl")
func = module.get_or_insert_function(
precomp_func.type.pointee, precomp_func.name)
[s, i] = args
cs = _conv_numba_struct_to_clang(builder, s, func.args[0].type)
result = builder.call(func, [cs, i])
return raise_return_type(context, builder, StringVal, result)
@register_function
@implement("+", StringVal, StringVal)
def add_stringval(context, builder, sig, args):
module = cgutils.get_module(builder)
precomp_func = context._get_precompiled_function("AddStringValImpl")
func = module.get_or_insert_function(
precomp_func.type.pointee, precomp_func.name)
fnctx_arg = context.get_arguments(cgutils.get_function(builder))[0]
cfnctx_arg = builder.bitcast(fnctx_arg, func.args[0].type)
[s1, s2] = args
cs1 = _conv_numba_struct_to_clang(builder, s1, func.args[1].type)
cs2 = _conv_numba_struct_to_clang(builder, s2, func.args[2].type)
result = builder.call(func, [cfnctx_arg, cs1, cs2])
return raise_return_type(context, builder, StringVal, result)
LLVM_TYPE = {
AnyVal: precompiled.get_type_named("struct.impala_udf::AnyVal"),
BooleanVal: precompiled.get_type_named("struct.impala_udf::BooleanVal"),
TinyIntVal: precompiled.get_type_named("struct.impala_udf::TinyIntVal"),
SmallIntVal: precompiled.get_type_named("struct.impala_udf::SmallIntVal"),
IntVal: precompiled.get_type_named("struct.impala_udf::IntVal"),
BigIntVal: precompiled.get_type_named("struct.impala_udf::BigIntVal"),
FloatVal: precompiled.get_type_named("struct.impala_udf::FloatVal"),
DoubleVal: precompiled.get_type_named("struct.impala_udf::DoubleVal"),
StringVal: precompiled.get_type_named("struct.impala_udf::StringVal"),
}
TYPE_LAYOUT = {
AnyVal: AnyValStruct,
BooleanVal: BooleanValStruct,
TinyIntVal: TinyIntValStruct,
SmallIntVal: SmallIntValStruct,
IntVal: IntValStruct,
BigIntVal: BigIntValStruct,
FloatVal: FloatValStruct,
DoubleVal: DoubleValStruct,
StringVal: StringValStruct,
}
class ImpalaTargetContext(BaseContext):
_impala_types = (AnyVal, BooleanVal, TinyIntVal, SmallIntVal, IntVal,
BigIntVal, FloatVal, DoubleVal, StringVal)
def init(self):
self.tm = le.TargetMachine.new()
# insert registered impls
self.insert_func_defn(registry.functions)
self.insert_attr_defn(registry.attributes)
self.insert_func_defn(stringimpl.registry.functions)
self.insert_attr_defn(stringimpl.registry.attributes)
self.optimizer = self.build_pass_manager()
# once per context
self._fnctxtype = precompiled.get_type_named(
"class.impala_udf::FunctionContext")
def _get_precompiled_function(self, name):
fns = [fn for fn in precompiled.functions if name in fn.name]
assert len(fns) == 1
return fns[0]
def cast(self, builder, val, fromty, toty):
if fromty not in self._impala_types and toty not in self._impala_types:
return super(ImpalaTargetContext, self).cast(
builder, val, fromty, toty)
if fromty == toty:
return val
# handle NULLs and Nones
if fromty == ntypes.none and toty in self._impala_types:
iv = TYPE_LAYOUT[toty](self, builder)
_set_is_null(builder, iv, cgutils.true_bit)
return iv._getvalue()
if fromty in self._impala_types and toty == AnyVal:
iv1 = TYPE_LAYOUT[fromty](self, builder, value=val)
is_null = _get_is_null(builder, iv1)
iv2 = AnyValStruct(self, builder)
# this is equiv to _set_is_null, but changes the GEP bc of AnyVal's
# structure
byte = builder.zext(is_null, lc.Type.int(8))
builder.store(byte, builder.gep(
iv2._getpointer(),
[lc.Constant.int(lc.Type.int(32), 0)] * 2, inbounds=True))
return iv2._getvalue()
if fromty == BooleanVal:
v = BooleanValStruct(self, builder, val)
return self.cast(builder, v.val, ntypes.boolean, toty)
if fromty == TinyIntVal:
v = TinyIntValStruct(self, builder, val)
return self.cast(builder, v.val, ntypes.int8, toty)
if fromty == SmallIntVal:
v = SmallIntValStruct(self, builder, val)
return self.cast(builder, v.val, ntypes.int16, toty)
if fromty == IntVal:
v = IntValStruct(self, builder, val)
return self.cast(builder, v.val, ntypes.int32, toty)
if fromty == BigIntVal:
v = BigIntValStruct(self, builder, val)
return self.cast(builder, v.val, ntypes.int64, toty)
if fromty == FloatVal:
v = FloatValStruct(self, builder, val)
return self.cast(builder, v.val, ntypes.float32, toty)
if fromty == DoubleVal:
v = DoubleValStruct(self, builder, val)
return self.cast(builder, v.val, ntypes.float64, toty)
# no way fromty is a *Val starting here
if toty == BooleanVal:
val = super(ImpalaTargetContext, self).cast(
builder, val, fromty, ntypes.int8)
return BooleanVal_ctor(self, builder, None, [val])
if toty == TinyIntVal:
val = super(ImpalaTargetContext, self).cast(
builder, val, fromty, ntypes.int8)
return TinyIntVal_ctor(self, builder, None, [val])
if toty == SmallIntVal:
val = super(ImpalaTargetContext, self).cast(
builder, val, fromty, ntypes.int16)
return SmallIntVal_ctor(self, builder, None, [val])
if toty == IntVal:
val = super(ImpalaTargetContext, self).cast(
builder, val, fromty, ntypes.int32)
return IntVal_ctor(self, builder, None, [val])
if toty == BigIntVal:
val = super(ImpalaTargetContext, self).cast(
builder, val, fromty, ntypes.int64)
return BigIntVal_ctor(self, builder, None, [val])
if toty == FloatVal:
val = super(ImpalaTargetContext, self).cast(
builder, val, fromty, ntypes.float32)
return FloatVal_ctor(self, builder, None, [val])
if toty == DoubleVal:
val = super(ImpalaTargetContext, self).cast(
builder, val, fromty, ntypes.float64)
return DoubleVal_ctor(self, builder, None, [val])
if toty == StringVal:
return StringVal_ctor(self, builder, None, [val])
return super(ImpalaTargetContext, self).cast(
builder, val, fromty, toty)
def get_constant_string(self, builder, ty, val):
assert ty == ntypes.string
literal = lc.Constant.stringz(val)
gv = cgutils.get_module(builder).add_global_variable(
literal.type, 'str_literal')
gv.linkage = lc.LINKAGE_PRIVATE
gv.initializer = literal
gv.global_constant = True
# gep gets pointer to first element of the constant byte array
return gv.gep([lc.Constant.int(lc.Type.int(32), 0)] * 2)
def get_constant_struct(self, builder, ty, val):
# override for converting literals to *Vals, incl. None
if ty in self._impala_types and val is None:
iv = TYPE_LAYOUT[ty](self, builder)
_set_is_null(builder, iv, cgutils.true_bit)
return iv._getvalue()
elif ty == BooleanVal:
const = lc.Constant.int(lc.Type.int(8), val)
return BooleanVal_ctor(self, builder, None, [const])
elif ty == TinyIntVal:
const = lc.Constant.int(lc.Type.int(8), val)
return TinyIntVal_ctor(self, builder, None, [const])
elif ty == SmallIntVal:
const = lc.Constant.int(lc.Type.int(16), val)
return SmallIntVal_ctor(self, builder, None, [const])
elif ty == IntVal:
const = lc.Constant.int(lc.Type.int(32), val)
return IntVal_ctor(self, builder, None, [const])
elif ty == BigIntVal:
const = lc.Constant.int(lc.Type.int(64), val)
return BigIntVal_ctor(self, builder, None, [const])
elif ty == FloatVal:
const = lc.Constant.real(lc.Type.float(), val)
return FloatVal_ctor(self, builder, None, [const])
elif ty == DoubleVal:
const = lc.Constant.real(lc.Type.double(), val)
return DoubleVal_ctor(self, builder, None, [const])
elif ty == StringVal:
iv = StringValStruct(self, builder)
_set_is_null(builder, iv, cgutils.false_bit)
iv.len = lc.Constant.int(lc.Type.int(32), len(val))
iv.ptr = self.get_constant_string(builder, ntypes.string, val)
return iv._getvalue()
else:
return super(ImpalaTargetContext, self).get_constant_struct(
builder, ty, val)
def get_struct_type(self, struct):
if hasattr(struct, '_name'):
# our custom named structs
return precompiled.get_type_named(struct._name)
else:
return super(ImpalaTargetContext, self).get_struct_type(struct)
def get_data_type(self, ty):
if ty in LLVM_TYPE:
return LLVM_TYPE[ty]
elif ty == FunctionContext:
return lc.Type.pointer(self._fnctxtype)
else:
return super(ImpalaTargetContext, self).get_data_type(ty)
def get_array(self, builder, itemvals, itemtys):
# only handle uniform type
assert all(x == itemtys[0] for x in itemtys)
ty = itemtys[0]
if ty not in self._impala_types:
raise NotImplementedError(
"Arrays of non-Impala types not supported")
def build_pass_manager(self):
opt = 0 # let Impala optimize
# opt = 3 # optimize ourselves
pms = lp.build_pass_managers(
tm=self.tm, opt=opt, loop_vectorize=True, fpm=False)
return pms.pm
def finalize(self, func, restype, argtypes):
func.verify()
func.linkage = lc.LINKAGE_INTERNAL
module = func.module
# Generate wrapper to adapt into Impala ABI
abi = ABIHandling(self, func, restype, argtypes)
wrapper = abi.build_wrapper("numba_udf." + func.name)
module.verify()
self.optimizer.run(module)
return wrapper
|
Python
|
CL
|
8996bbc748be0d2c0c673562cad61137e91dda8c9dae1abc3f810e988e5423c4
|
#!/usr/bin/env python
"""This script configures GigabitEthernet2 interfaces on network devices.
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
from netconf_functions import check_ip, set_ip
import ciscosparkapi
# Get the absolute path for the directory where this file is located "here"
here = os.path.abspath(os.path.dirname(__file__))
# Get the absolute path for the project / repository root
project_root = os.path.abspath(os.path.join(here, "../.."))
# Extend the system path to include the project root and import the env files
sys.path.insert(0, project_root)
import env_lab # noqa
import env_user # noqa
# Create a Cisco Spark object
spark = ciscosparkapi.CiscoSparkAPI(access_token=env_user.SPARK_ACCESS_TOKEN)
# Create message list
messages = [
"I've completed the Intro to Model Driven Programmability Mission!"
]
# Create a list of devices to query
devices = [
{"conn": env_lab.IOS_XE_1, "ip": "172.16.255.1", "prefix": "24"},
{"conn": env_lab.IOS_XE_2, "ip": "172.16.255.2", "prefix": "24"},
]
# Step 1: Query the devices for the current interface configuration.
print("Checking the current IP configuration on GigabitEthernet2 on devices")
# Query both devices for current interface configuration
for device in devices:
check_ip(device)
# Step 2: Configure the IP addresses for GigabitEthernet2 on the devices
print("Attempting to configure GigabitEthernet2 IP addressing")
# Configure GigabitEthernet2 IP on each device
for device in devices:
set_ip(device)
# Step 3: Print updated IP addresses on devices
print(
"Re-Checking the current IP configuration on GigabitEthernet2 on devices"
)
# Query both devices for current interface configuration
for device in devices:
result = check_ip(device)
messages.append("Device {} IP set to {}".format(result[0], result[1]))
# Post message to Spark
spark.messages.create(roomId=env_user.SPARK_ROOM_ID, text="\n".join(messages))
|
Python
|
CL
|
c7d6f89bab510f9332522b3a628cb1e5739eb65211c5993374edc3cec56f04b4
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-docstring,invalid-name,no-member
# pylint: disable=attribute-defined-outside-init
import copy
from qiskit.quantum_info.synthesis import OneQubitEulerDecomposer
from qiskit import QuantumCircuit, ClassicalRegister, QuantumRegister
try:
from qiskit.compiler import transpile
TRANSPILER_SEED_KEYWORD = 'seed_transpiler'
except ImportError:
from qiskit.transpiler import transpile
TRANSPILER_SEED_KEYWORD = 'seed_mapper'
try:
from qiskit.quantum_info.random import random_unitary
HAS_RANDOM_UNITARY = True
except ImportError:
from qiskit.tools.qi.qi import random_unitary_matrix
HAS_RANDOM_UNITARY = False
# Make a random circuit on a ring
def make_circuit_ring(nq, depth, seed):
assert int(nq / 2) == nq / 2 # for now size of ring must be even
# Create a Quantum Register
q = QuantumRegister(nq)
# Create a Classical Register
c = ClassicalRegister(nq)
# Create a Quantum Circuit
qc = QuantumCircuit(q, c)
offset = 1
decomposer = OneQubitEulerDecomposer()
# initial round of random single-qubit unitaries
for i in range(nq):
qc.h(q[i])
for j in range(depth):
for i in range(int(nq / 2)): # round of CNOTS
k = i * 2 + offset + j % 2 # j%2 makes alternating rounds overlap
qc.cx(q[k % nq], q[(k + 1) % nq])
for i in range(nq): # round of single-qubit unitaries
if HAS_RANDOM_UNITARY:
u = random_unitary(2, seed).data
else:
u = random_unitary_matrix(2) # pylint: disable=used-before-assignment # noqa
angles = decomposer.angles(u)
qc.u3(angles[0], angles[1], angles[2], q[i])
# insert the final measurements
qcm = copy.deepcopy(qc)
for i in range(nq):
qcm.measure(q[i], c[i])
return [qc, qcm, nq]
class BenchRandomCircuitHex:
params = [2 * i for i in range(2, 8)]
param_names = ['n_qubits']
version = 3
def setup(self, n):
depth = 2 * n
self.seed = 0
self.circuit = make_circuit_ring(n, depth, self.seed)[0]
def time_ibmq_backend_transpile(self, _):
# Run with ibmq_16_melbourne configuration
coupling_map = [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4],
[5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10],
[11, 3], [11, 10], [11, 12], [12, 2], [13, 1],
[13, 12]]
transpile(self.circuit,
basis_gates=['u1', 'u2', 'u3', 'cx', 'id'],
coupling_map=coupling_map,
**{TRANSPILER_SEED_KEYWORD: self.seed})
def track_depth_ibmq_backend_transpile(self, _):
# Run with ibmq_16_melbourne configuration
coupling_map = [[1, 0], [1, 2], [2, 3], [4, 3], [4, 10], [5, 4],
[5, 6], [5, 9], [6, 8], [7, 8], [9, 8], [9, 10],
[11, 3], [11, 10], [11, 12], [12, 2], [13, 1],
[13, 12]]
return transpile(self.circuit,
basis_gates=['u1', 'u2', 'u3', 'cx', 'id'],
coupling_map=coupling_map,
**{TRANSPILER_SEED_KEYWORD: self.seed}).depth()
|
Python
|
CL
|
d919f7ff237c0a5348a9f219da2a6fc3356e291655dfdc48af30b4ce5c2b1392
|
from decimal import Decimal
import os
import random
import re
import sys
DAMPING = 0.85
SAMPLES = 10000
def main():
if len(sys.argv) != 2:
sys.exit("Usage: python pagerank.py corpus")
corpus = crawl(sys.argv[1])
ranks = sample_pagerank(corpus, DAMPING, SAMPLES)
print(f"PageRank Results from Sampling (n = {SAMPLES})")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
ranks = iterate_pagerank(corpus, DAMPING)
print(f"PageRank Results from Iteration")
for page in sorted(ranks):
print(f" {page}: {ranks[page]:.4f}")
def crawl(directory):
"""
Parse a directory of HTML pages and check for links to other pages.
Return a dictionary where each key is a page, and values are
a list of all other pages in the corpus that are linked to by the page.
"""
pages = dict()
# Extract all links from HTML files
for filename in os.listdir(directory):
if not filename.endswith(".html"):
continue
with open(os.path.join(directory, filename)) as f:
contents = f.read()
links = re.findall(r"<a\s+(?:[^>]*?)href=\"([^\"]*)\"", contents)
pages[filename] = set(links) - {filename}
# Only include links to other pages in the corpus
for filename in pages:
pages[filename] = set(
link for link in pages[filename]
if link in pages
)
return pages
def transition_model(corpus, page, damping_factor):
"""
Return a probability distribution over which page to visit next,
given a current page.
With probability `damping_factor`, choose a link at random
linked to by `page`. With probability `1 - damping_factor`, choose
a link at random chosen from all pages in the corpus.
"""
probability_distribution = dict()
corpus_size = len(corpus)
links_number = len(corpus[page])
if links_number == 0:
damping_factor = 0
for elem in corpus:
value = (Decimal('1') - Decimal(f'{damping_factor}')) / corpus_size
if elem in corpus[page]:
value += Decimal(f'{damping_factor}') / links_number
probability_distribution[elem] = float(value)
return probability_distribution
def sample_pagerank(corpus, damping_factor, n):
"""
Return PageRank values for each page by sampling `n` pages
according to transition model, starting with a page at random.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
page_rank = {key: 0 for key in corpus}
# Choose first page at random
current_page = random.choice(list(corpus))
page_rank[current_page] = 1
# Continue until n samples are generated
i = 1
while i < n:
prob_distribution = transition_model(corpus, current_page,
damping_factor)
current_page = random.choices(list(prob_distribution),
weights=list(prob_distribution.values()),
k=1)[0]
page_rank[current_page] += 1
i += 1
# Calculate PageRank values
for key in page_rank:
page_rank[key] /= n
return page_rank
def iterate_pagerank(corpus, damping_factor):
"""
Return PageRank values for each page by iteratively updating
PageRank values until convergence.
Return a dictionary where keys are page names, and values are
their estimated PageRank value (a value between 0 and 1). All
PageRank values should sum to 1.
"""
# Calculate initial PageRank values
corpus_size = len(corpus)
page_rank = {key: 1 / corpus_size for key in corpus}
# Replace values for pages that have no links at all
edited_corpus = no_links_page(corpus)
# Get dict mapping page with set of all pages that link to it
links_to_pages = links_to_page(edited_corpus)
# Continue to calculate new PageRank values
while True:
result = dict()
for page in page_rank:
pr = (1 - damping_factor) / corpus_size
link_sum = 0
for el in links_to_pages[page]:
link_sum += page_rank[el] / len(edited_corpus[el])
pr += damping_factor * link_sum
result[page] = pr
# Break if no PageRank value changes by more than 0.001
# since the last iteration
if all([abs(page_rank[p] - result[p]) <= 0.001 for p in page_rank]):
break
# Save the last iteration results
page_rank = result
return result
def no_links_page(corpus):
"""
Return edited corpus dictionary where all pages that had no
links at all now have one link to every page in the corpus.
"""
return {key: (v if v else set(corpus)) for key, v in corpus.items()}
def links_to_page(corpus):
"""
Return dictionary mapping page name to a set of all pages
possessing link to that page.
"""
result = dict()
for page in corpus:
links = set()
for key in corpus:
if page in corpus[key]:
links.add(key)
result[page] = links
return result
if __name__ == "__main__":
main()
|
Python
|
CL
|
4cef24119b83ded6a238d06468adb14043915108d082ae98da14b4edf926f6ea
|
"""Config for TD3 on Reacher-v2.
- Author: Kyunghwan Kim
- Contact: kh.kim@medipixel.io
"""
agent = dict(
type="TD3Agent",
hyper_params=dict(
gamma=0.95,
tau=5e-3,
buffer_size=int(1e6),
batch_size=100,
initial_random_action=int(1e4),
policy_update_freq=2,
),
learner_cfg=dict(
type="TD3Learner",
backbone=dict(actor=dict(), critic=dict()),
head=dict(
actor=dict(
type="MLP",
configs=dict(hidden_sizes=[400, 300], output_activation="tanh",),
),
critic=dict(
type="MLP",
configs=dict(
hidden_sizes=[400, 300],
output_size=1,
output_activation="identity",
),
),
),
optim_cfg=dict(lr_actor=1e-3, lr_critic=1e-3, weight_decay=0.0),
),
noise_cfg=dict(
exploration_noise=0.1, target_policy_noise=0.2, target_policy_noise_clip=0.5
),
)
|
Python
|
CL
|
520c746ffbcac880d096c8199bf1a0c81cfe62d9b381e5c34a02e32305b7d3e6
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 23:15:02 2019
@author: ljp
"""
import numpy as np
from scipy.stats import multivariate_normal
import random
from scipy.ndimage import convolve
##### Function that creates plans from parameters #####
def unet_create_plans(number_of_plans, plan_size, num_mean, num_std, info_neuron, coord_threshold):
"""Function that creates stack of plans from parameters.
%%%%% INPUTS %%%%%
- number_of_plans: final number of stacked plans in output (integer)
- plan_size: size of each plan in pixels (vector of 2 integers)
- num_mean: mean number of neurons per plan (integer)
- num_std: standard deviation of number of neurons per plan (integer)
- info_neuron: series of 8 floats describing each neuron, comprising:
- neu_mean: mean intensity
- neu_std: standard deviation of intensity
- cor_mean: mean covariance (inclinaison)
- cor_std: standard deviation of covariance
- xvar_mean: mean variance in the x-axis direction
- xvar_std: standard deviation of the variance in the x-axis direction
- yvar_mean: mean variance in the y-axis direction
- yvar_std: standard deviation of the variance in the y-axis direction
%%%%% OUTPUTS %%%%%
- plans_temp: plans with neurons stacked along first dimension ((number_of_plans, plan_size[0], plan_size[1]) matrix)
- coord_temp: coordinates of all neurons, with associated plan ((number_of_plans, plan_size[0], plan_size[1]) matrix)
"""
# Retriving information on neurons
neu_mean, neu_std, cor_mean, cor_std, xvar_mean, xvar_std, yvar_mean, yvar_std = info_neuron
# Creating initial set of plans
plans_temp = np.zeros((number_of_plans, plan_size[0], plan_size[1]))
# Defining coordinates vector
coord_temp = np.zeros((number_of_plans, plan_size[0], plan_size[1]))
# Creating Xfield and Yfield vectors
Xfield, Yfield = np.mgrid[-6:7, -6:7]
Xfield = Xfield.flatten()[:, np.newaxis]
Yfield = Yfield.flatten()[:, np.newaxis]
# Defining number of neurons per plan
numfin = np.round_(num_mean + num_std*np.random.randn(number_of_plans))
# Loop on each plan
for i in range(number_of_plans):
# Loop on each potential neuron in plan i
for j in range(int(numfin[i])):
# Finding available space for neuron
finding = -1
numround = 0
numlim = 50
while finding == -1 and numround < numlim:
randx = int(np.round_(np.random.rand() * (plan_size[0] - 15)) + 7)
randy = int(np.round_(np.random.rand() * (plan_size[1] - 15)) + 7)
anafield = plans_temp[i, (randx-7):(randx+6), (randy-7):(randy+6)]
if np.count_nonzero(anafield) == 0:
break
numround += 1
if numround == numlim:
continue
# Defining neuron's information
matcorval = cor_mean + cor_std * np.random.randn()
matrixstd = np.array([[xvar_mean + xvar_std*np.random.randn(), matcorval],
[matcorval, yvar_mean + yvar_std*np.random.randn()]])
if np.linalg.det(matrixstd) < 0:
continue
# Making matrix with neuron's information
numval = (neu_mean + neu_std*np.random.randn()) * np.sqrt((2*np.pi)**2 * np.linalg.det(matrixstd))
mvn = multivariate_normal(mean = np.array([0, 0]), cov = matrixstd)
valexp = numval * mvn.pdf(np.hstack((Xfield, Yfield)))
if numval <= 0:
continue
# Replacing in final plan
plan_temp = valexp.reshape(13, 13)
plans_temp[i, (randx-7):(randx+6), (randy-7):(randy+6)] = plan_temp
# Adding neuron detection in coord_temp
coord_temp[i, (randx-7):(randx+6), (randy-7):(randy+6)] = 1 * (plan_temp > neu_mean/coord_threshold)
return plans_temp, coord_temp
##### Function that stacks plans from create_plans function #####
def unet_stack_plans(nstacks, plans_dataset, coord_dataset, cropped_size, stack_mean, stack_std, noise_mean, noise_std):
"""Function that randomly stack plans given as inputs.
%%%%% INPUTS %%%%%
- nstacks: final number of stacked plans (integer)
- plans_dataset: plans dataset obtained from unet_create_plans (3d matrix)
- coord_dataset: coordinates dataset obtained from unet_create_plans (3d matrix)
- cropped_size: final size of plans after cropped (integer)
- stack_mean: mean number of plans stacked for one final plan (integer)
- stack_std: standard deviation of number of plans stacked for one final plan (integer)
- noise_mean: mean of noise plan added to stacked plans (float)
- noise_std: standard deviation of noise plan added to stacked plans (float)
%%%%% OUTPUTS %%%%%
- plans_stack: final plans stacked along first dimension ((nstacks, cropped_size, cropped_size) matrix)
- coord_stack: coordinates of all neurons, with associated final plan ((number of neurons, 3) matrix)
"""
# First defining final matrices
plans_stack = np.zeros((nstacks, cropped_size, cropped_size))
coord_stack = np.zeros((nstacks, cropped_size, cropped_size))
# Number of stacks in plans_dataset
nstacks_old = plans_dataset.shape[0]
# Parameters for cropping
xcrop = plans_dataset.shape[1] - cropped_size
ycrop = plans_dataset.shape[2] - cropped_size
# Filter for noise, will be convolved to have continuous noise
filter_shape = 5
filter_noise = np.zeros((filter_shape, filter_shape))
for i in range(filter_shape):
for j in range(filter_shape):
radius_temp = np.sqrt((i-(filter_shape-1)/2)**2 + (j-(filter_shape-1)/2)**2)
if radius_temp <= 1:
filter_noise[i, j] = 1
elif radius_temp <= 2:
filter_noise[i, j] = 0.5
# Launching loop
for i in range(nstacks):
# Selecting number of stacks
stack_f = int(np.round_(stack_mean + stack_std*np.random.randn()))
if stack_f <= 0:
stack_f = 1
# Creating temporary plan
plans_temp = np.zeros((stack_f+1, cropped_size, cropped_size))
# Creating a temporaty coordinates matrix
coords_temp = np.zeros((stack_f, cropped_size, cropped_size))
# Chosing plans to use
plan_use = np.floor(nstacks_old*np.random.rand(stack_f)).astype(int)
# Second for loop for each plan
for j in range(stack_f):
# Loading plan and coordinates
plan_temp = plans_dataset[plan_use[j], :, :]
coord_temp = coord_dataset[plan_use[j], :, :]
# Cropping
xcrop_init = random.randint(0, xcrop) #np.random.random_integers(0, xcrop)
ycrop_init = random.randint(0, ycrop) #np.random.random_integers(0, ycrop)
plan_temp = plan_temp[xcrop_init:(xcrop_init+cropped_size), ycrop_init:(ycrop_init+cropped_size)]
coord_temp = coord_temp[xcrop_init:(xcrop_init+cropped_size), ycrop_init:(ycrop_init+cropped_size)]
# Rotating
rotnum = random.randint(0, 3) #np.random.random_integers(0, 3)
for k in range(rotnum):
plan_temp = plan_temp.T[::-1, :]
coord_temp = coord_temp.T[::-1, :]
# Adding to temporary stack
plans_temp[j, :, :] = plan_temp
coords_temp[j, :, :] = coord_temp
# Adding noise plan
plan_noise = noise_mean + np.abs(noise_std*np.random.randn(cropped_size, cropped_size))
plans_temp[-1, :, :] = convolve(plan_noise, filter_noise, mode="constant") # convolution to filter
# Merging stacks into stack_plan
stack_plan_temp = np.max(plans_temp, axis=0)
stack_plan_temp[stack_plan_temp < 0] = 0
plans_stack[i, :, :] = stack_plan_temp
# Deleting doublons in stack_coord
argmax_plan_temp = np.argmax(plans_temp, axis=0) + 1
coord_stack[i, :, :] = argmax_plan_temp * np.max(coords_temp, axis=0)
# Returning values
return plans_stack, coord_stack
|
Python
|
CL
|
b892d7febe320e44727b98b2d342d882eaac56c721e6c0aff60854753d9ebdb5
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import os
import lda
import time
### This script runs regular LDA on a patient record training set (90% of the
### original data). Writes out the herb counts, symptom counts, code list (for
### mapping symptoms/herbs to integers), and the word distributions for each
### topic. The number of topics will match the number of unique diseases.
### Run time for 5000 iterations: 3.5 hours.
date_format = '%Y-%m-%d'
def generate_folders():
'''
Creates the results directory.
'''
res_dir = './results/'
if not os.path.exists(res_dir):
os.makedirs(res_dir)
count_dir = './data/count_dictionaries/'
if not os.path.exists(count_dir):
os.makedirs(count_dir)
code_list_dir = './data/code_lists/'
if not os.path.exists(code_list_dir):
os.makedirs(code_list_dir)
lda_res_dir = './results/lda_word_distributions/'
if not os.path.exists(lda_res_dir):
os.makedirs(lda_res_dir)
def get_patient_dct(filename):
'''
Returns dictionary
Key: (name, date of birth) -> (str, str)
Value: dictionary, where keys are (name, DOB) pairs and values are tuples
containing the diseases, diagnosis dates, symptoms, and herbs of each visit.
'''
# Keep track of the unique set of diseases so we know n_topics.
patient_dct, disease_set = {}, set([])
f = open(filename, 'r')
for i, line in enumerate(f):
diseases, name, dob, visit_date, symptoms, herbs = line.split('\t')
# Always ends with a colon, so the last element of the split will be
# the empty string.
disease_list = diseases.split(':')[:-1]
disease_set = disease_set.union(disease_list)
visit_date = visit_date.split(',')[1][:len('xxxx-xx-xx')]
# Format the diagnosis date.
visit_date = datetime.datetime.strptime(visit_date, date_format)
# Format symptom and herb lists and remove duplicates.
symptom_list = list(set(symptoms.split(':')[:-1]))
herb_list = list(set(herbs.split(':')[:-1]))
# Name, date of birth pair uniquely identifies a patient.
key = (name, dob)
# Initialize the patient's visit dictionary.
if key not in patient_dct:
patient_dct[key] = {}
# If multiple visits in one day, add on a second to the visit.
while visit_date in patient_dct[key]:
visit_date += datetime.timedelta(0,1)
# list(set()) removes duplicates.
patient_dct[key][visit_date] = (disease_list, symptom_list, herb_list)
f.close()
return patient_dct, disease_set
def get_symptom_and_herb_counts(patient_dct, run_num):
'''
Given the patient dictionary, count the symptom and herb occurrences in
patients with more than one visit. Writes the counts out to file.
Returns the list of unique medical codes.
'''
herb_count_dct, symptom_count_dct = {}, {}
for key in patient_dct:
visit_dct = patient_dct[key]
for date in visit_dct:
disease_list, symptom_list, herb_list = visit_dct[date]
# Update the counts of each symptom and herb.
for symptom in symptom_list:
if symptom not in symptom_count_dct:
symptom_count_dct[symptom] = 0
symptom_count_dct[symptom] += 1
for herb in herb_list:
if herb not in herb_count_dct:
herb_count_dct[herb] = 0
herb_count_dct[herb] += 1
# Write out the unique symptoms and herbs to file.
herb_out = open('./data/count_dictionaries/herb_count_dct_%s.txt' %
run_num, 'w')
for herb in herb_count_dct:
herb_out.write('%s\t%d\n' % (herb, herb_count_dct[herb]))
herb_out.close()
symptom_out = open('./data/count_dictionaries/symptom_count_dct_%s.txt' %
run_num, 'w')
for symptom in symptom_count_dct:
symptom_out.write('%s\t%d\n' % (symptom, symptom_count_dct[symptom]))
symptom_out.close()
return list(set(symptom_count_dct.keys()).union(herb_count_dct.keys()))
def write_code_list(code_list, run_num):
'''
Writes the code list out to file.
'''
out = open('./data/code_lists/code_list_%s.txt' % run_num, 'w')
for code in code_list:
out.write('%s\n' % code)
out.close()
def get_matrix_from_dct(patient_dct, code_list):
'''
Convert the patient dictionary to a document-term matrix.
'''
patient_matrix = []
for key in patient_dct:
visit_dct = patient_dct[key]
for date in sorted(visit_dct.keys()):
disease_list, symptom_list, herb_list = visit_dct[date]
curr_code_list = symptom_list + herb_list
# Create binary vectors for each patient visit.
curr_row = [1 if c in curr_code_list else 0 for c in code_list]
patient_matrix += [curr_row]
return np.array(patient_matrix)
def run_baseline_lda(patient_matrix, code_list, disease_set):
model = lda.LDA(n_topics=len(disease_set), n_iter=5000, random_state=1)
model.fit(patient_matrix)
topic_word = model.topic_word_
return topic_word
def main():
generate_folders()
for run_num in range(10):
# Fetch the training patient record dictionary.
patient_fname = './data/train_test/train_no_expansion_%s.txt' % run_num
patient_dct, disease_set = get_patient_dct(patient_fname)
# code_list is the vocabulary list.
code_list = get_symptom_and_herb_counts(patient_dct, run_num)
write_code_list(code_list, run_num)
# Convert the patient dictionary to a matrix for LDA.
patient_matrix = get_matrix_from_dct(patient_dct, code_list)
# Run LDA.
topic_word = run_baseline_lda(patient_matrix, code_list, disease_set)
np.savetxt('./results/lda_word_distributions/lda_word_distribution_%s'
'.txt' % run_num, topic_word)
if __name__ == '__main__':
start_time = time.time()
main()
print "---%f seconds---" % (time.time() - start_time)
|
Python
|
CL
|
7d98c238da3c912384ea8d47c2e789feb9978150831a1c616742cd2600520c50
|
#!/usr/bin/python
import numpy as np
import h5py
import ipdb as pdb
import os
from etrack.io import dataformats
from etrack.io.dataformats import ClassAttr
##############################################################################
# I/O #
##############################################################################
def write_object_to_hdf5(obj, h5group, name, pyobj_to_h5=None):
"""
Take the user-defined class instance, obj, and write it to HDF5
in HDF5 group h5group with name name.
Requires data_format to be an attribute of the object.
h5group is an existing h5py file/group to write to. The class attributes
are attributes in h5group, datasets in h5group, or subgroups of h5group.
pyobj_to_h5 = object dictionary:
{pyobjectA: h5objectA, pyobjectB: h5objectB, ...}
"""
if pyobj_to_h5 is None:
pyobj_to_h5 = {}
def check_input(obj, h5group):
"""
"""
if not hasattr(obj, 'data_format'):
raise InterfaceError(
'Need attribute data_format in order to write object to HDF5')
if not isinstance(h5group, h5py.Group):
raise InterfaceError(
'h5group should be a file or group from h5py')
try:
if not isinstance(h5group.file, h5py.File):
raise InterfaceError(
'h5group should be a file or group from h5py')
except RuntimeError:
raise InterfaceError(
'RuntimeError on file attribute - please confirm file ' +
'is not already closed')
if h5group.file.mode != 'r+':
raise InterfaceError(
'Cannot write object to h5file in read-only mode')
def check_attr(obj, attr):
"""
attribute may be None, list/tuple, dict, or "singular" object
"""
# get attribute
try:
data = getattr(obj, attr.name)
except AttributeError:
raise InterfaceError(
'Attribute {} does not exist in obj {}'.format(
attr.name, obj))
# check for disallowed None
if not attr.may_be_none and data is None:
raise InterfaceError(
'Found unexpected "None" value on {} attribute of {}'.format(
attr.name, obj))
if data is None:
return data
# remaining checks are not applicable
if isinstance(data, list) or isinstance(data, tuple):
# check if list/tuple is disallowed
if not attr.is_always_list and not attr.is_sometimes_list:
raise InterfaceError(
'Found unexpected list type on {} attribute of {}'.format(
attr.name, obj))
# item checks occur later, in main
elif isinstance(data, dict):
# check if dict is disallowed
if not attr.is_always_dict and not attr.is_sometimes_dict:
raise InterfaceError(
'Found unexpected dict type on {} attribute of {}'.format(
attr.name, obj))
# item checks occur later, in main
else:
if attr.is_always_list:
raise InterfaceError(
'Expected a list type on {} attribute of {}'.format(
attr.name, obj))
if attr.is_always_dict:
raise InterfaceError(
'Expected a dict type on {} attribute of {}'.format(
attr.name, obj))
check_item(attr, data)
return data
def check_item(attr, item):
"""
item must not be a list/tuple or dict. it is a "singular" object.
Therefore, ignore the list and dict property flags of attr.
"""
# None checks
if not attr.may_be_none and item is None:
raise InterfaceError(
'Found unexpected "None" value on {} attribute of {}'.format(
attr.name, item))
if item is None:
return item
# remaining checks are not applicable, and no need to return item
# other types. exact type not required, but must be castable
if attr.is_user_object:
pass
# can't check type because user-defined dtype has to be just a
# classname string
# # must be strict about type
# if not isinstance(item, attr.dtype):
# raise InterfaceError(
# 'Expected user object of type ' + str(attr.dtype) +
# ', found a ' + str(type(item)))
# # other checks performed in a sub-call of write_object_to_hdf5
elif attr.dtype is np.ndarray:
try:
item = np.array(item)
except ValueError:
raise InterfaceError(
'Attribute data {} of type {} cannot be cast to {}'.format(
attr.name, type(item), attr.dtype))
else:
try:
item = attr.dtype(item)
except ValueError:
raise InterfaceError(
'Attribute data {} of type {} cannot be cast to {}'.format(
attr.name, type(item), attr.dtype))
return item
def write_item(attr, name, data, h5group, pyobj_to_h5):
"""
Write one item to the hdf5 file.
Inputs:
attr: the ClassAttr object describing this attribute.
name: name for the new hdf5 object
(either attr.name or the dict key)
data: data to put in the object (intelligently)
(either attr.data or the dict value)
h5group: parent location of the new hdf5 object
pyobj_to_h5: object dictionary to update
"""
if attr.is_user_object:
# check id
if data in pyobj_to_h5:
# don't write the actual data; make a hard link
h5group[name] = pyobj_to_h5[data]
else:
# recurse
write_object_to_hdf5(data, h5group, name,
pyobj_to_h5=pyobj_to_h5)
elif attr.make_dset:
h5group.create_dataset(
name, shape=np.shape(data), data=data)
else:
h5group.attrs.create(
name, data, shape=np.shape(data))
# ~~~ begin main ~~~
check_input(obj, h5group)
if obj in pyobj_to_h5:
h5group[name] = pyobj_to_h5[obj]
return None
else:
if name in h5group:
del(h5group[name])
this_group = h5group.create_group(name)
this_group.attrs.create('obj_type', data=obj.class_name)
pyobj_to_h5[obj] = this_group
for attr in obj.data_format:
data = check_attr(obj, attr)
# if None: skip
if data is None:
continue
is_list = isinstance(data, list) or isinstance(data, tuple)
is_dict = isinstance(data, dict)
if is_list:
subgroup = this_group.create_group(attr.name)
subgroup.attrs.create('obj_type', data='list')
for i, item in enumerate(data):
item = check_item(attr, item)
write_item(attr, str(i), item, subgroup, pyobj_to_h5)
elif is_dict:
subgroup = this_group.create_group(attr.name)
subgroup.attrs.create('obj_type', data='dict')
for key, item in data.items():
item = check_item(attr, item)
write_item(attr, key, item, subgroup, pyobj_to_h5)
else:
write_item(attr, attr.name, data, this_group, pyobj_to_h5)
return None
def write_objects_to_hdf5(h5group_or_filename, pyobj_to_h5=None, **kwargs):
"""
Write a list of objects to file.
h5group_or_filename, as implied, can be either a h5file/h5group object,
or a filename for an h5file to create.
If the filename is missing the .h5 or .hdf5 extension, .h5 will be added.
And the h5file will be closed if filename is supplied.
kwargs in the form:
h5_object_name=py_object
"""
if pyobj_to_h5 is None:
pyobj_to_h5 = {}
if (isinstance(h5group_or_filename, str) or
isinstance(h5group_or_filename, unicode)):
# filename supplied: create h5 file and close when finished
filename = h5group_or_filename
# check extension
if filename[-5:] != '.hdf5' and filename[-3:] != '.h5':
filename += '.h5'
with h5py.File(filename) as h5group:
for key, val in kwargs.iteritems():
write_object_to_hdf5(val, h5group, key,
pyobj_to_h5=pyobj_to_h5)
return filename
elif isinstance(h5group_or_filename, h5py.Group):
# h5group supplied: just write the objects
h5group = h5group_or_filename
for key, val in kwargs.iteritems():
write_object_to_hdf5(val, h5group, key,
pyobj_to_h5=pyobj_to_h5)
return h5group.file.filename
else:
raise InterfaceError(
'write_objects_to_hdf5 needs either an h5py.Group ' +
'or a string filename')
return None
def write_object_list_to_hdf5(h5group_or_filename, obj_list, prefix=None):
"""
Write a list of objects into an HDF5 file. Their group names are numerical.
pyobj_to_h5 dict is always initialized fresh.
prefix: text to put before the numerical name.
The group name is 0-padded as needed.
"""
pyobj_to_h5 = {}
if prefix is None:
prefix = ''
maxnumlen = len(str(len(obj_list)))
fmtstring = prefix + '{:0' + str(maxnumlen) + 'd}'
if (isinstance(h5group_or_filename, str) or
isinstance(h5group_or_filename, unicode)):
# filename supplied: create h5 file and close when finished
filename = h5group_or_filename
# check extension
if filename[-5:] != '.hdf5' and filename[-3:] != '.h5':
filename += '.h5'
with h5py.File(filename) as h5group:
for i, obj in enumerate(obj_list):
groupname = fmtstring.format(i)
write_object_to_hdf5(obj, h5group, groupname,
pyobj_to_h5=pyobj_to_h5)
return filename
elif isinstance(h5group_or_filename, h5py.Group):
# h5group supplied: just write the objects
h5group = h5group_or_filename
for i, obj in enumerate(obj_list):
groupname = fmtstring.format(i)
write_object_to_hdf5(obj, h5group, groupname,
pyobj_to_h5=pyobj_to_h5)
return h5group.file.filename
else:
raise InterfaceError(
'write_objects_to_hdf5 needs either an h5py.Group ' +
'or a string filename')
return None
def read_object_from_hdf5(h5group, h5_to_pydict=None, ext_data_format=None,
verbosity=0):
"""
Take an HDF5 group which represents a class instance, parse and return it
as a dictionary of attribute values.
The class definition should exist in dataformats.py.
h5_to_pydict = {h5objectA: pyobjectA, h5objectB: pyobjectB, ...}
"""
if h5_to_pydict is None:
h5_to_pydict = {}
def check_input(h5group, data_format):
"""
Input HDF5 group should have 'obj_type' attribute which matches
a class we know.
"""
if ext_data_format is not None:
# for testing: don't use dataformats.get_format()
return ext_data_format
else:
if 'obj_type' not in h5group.attrs:
if h5group == h5group.file:
raise InterfaceError(
'Looks like you supplied the HDF5 file object ' +
'instead of the HDF5 group representing the object...')
else:
raise InterfaceError(
'HDF5 object should have an attribute, obj_type')
obj_type = h5group.attrs['obj_type']
data_format = dataformats.get_format(obj_type)
return data_format
def check_attr(h5group, attr):
"""
Check that the data and attributes in h5group are compatible with the
data description in attr.
Return the attribute type: 'none', 'list', 'dict', or 'single'
Type 'single' includes basic data types and user objects.
"""
if attr.name in h5group:
# it is either an h5group or a dataset.
if 'obj_type' in h5group[attr.name].attrs:
# if it has the obj_type attribute,
# it is either a list or a dict or a user-defined object.
obj_type = h5group[attr.name].attrs['obj_type']
if obj_type == 'list':
if not attr.is_always_list and not attr.is_sometimes_list:
raise InterfaceError(
'Unexpected list in HDF5 file for attribute ' +
'{}'.format(attr.name))
hdf5_type = 'list'
elif obj_type == 'dict':
if not attr.is_always_dict and not attr.is_sometimes_dict:
raise InterfaceError(
'Unexpected dict in HDF5 file for attribute ' +
'{}'.format(attr.name))
hdf5_type = 'dict'
else:
# user object
if attr.is_always_list:
raise InterfaceError(
'Expected a list in HDF5 file for attribute ' +
'{}'.format(attr.name))
elif attr.is_always_dict:
raise InterfaceError(
'Expected a dict in HDF5 file for attribute ' +
'{}'.format(attr.name))
elif not attr.is_user_object:
raise InterfaceError(
'Unexpected user object in HDF5 file for ' +
'attribute {}'.format(attr.name))
hdf5_type = 'single'
else:
# not marked with obj_type attribute. A dataset.
if not attr.make_dset:
raise InterfaceError(
'Unexpected dataset in HDF5 file for attribute ' +
'{}'.format(attr.name))
hdf5_type = 'single'
elif attr.name in h5group.attrs:
# it is an h5 attribute.
if attr.is_always_list:
raise InterfaceError(
'Expected a list in HDF5 file for attribute ' +
'{}'.format(attr.name))
elif attr.is_always_dict:
raise InterfaceError(
'Expected a dict in HDF5 file for attribute ' +
'{}'.format(attr.name))
elif attr.make_dset:
raise InterfaceError(
'Expected a dataset in HDF5 file for attribute ' +
'{}; found HDF5 attribute instead'.format(attr.name))
elif attr.is_user_object:
raise InterfaceError(
'Expected a user object in HDF5 file for attribute ' +
'{}; found HDF5 attribute instead'.format(attr.name))
hdf5_type = 'single'
else:
# not a h5 group, dataset, or attribute. It isn't there.
if not attr.may_be_none:
raise InterfaceError(
'Failed to find required attribute ' +
'{} in HDF5 file'.format(attr.name))
hdf5_type = 'none'
return hdf5_type
def read_item(attr, h5item, h5_to_pydict=None):
if h5_to_pydict is None:
h5_to_pydict = {}
vprint(' Reading item {}'.format(h5item))
if (not isinstance(h5item, np.ndarray)) and h5item in h5_to_pydict:
vprint(' Item {} in h5_to_pydict! Skipping'.format(h5item))
return h5_to_pydict[h5item]
if attr.make_dset:
# hdf5 dataset
# only the np.ndarray should be non-singular
if attr.dtype != np.ndarray and h5item.shape != ():
raise InterfaceError('Found too many elements for attribute ' +
'{}'.format(attr.name))
if attr.dtype == int or attr.dtype == float or attr.dtype == bool:
output = np.zeros(())
h5item.read_direct(output)
output = attr.dtype(output)
elif attr.dtype == np.ndarray:
output = np.zeros(h5item.shape)
if len(output):
h5item.read_direct(output)
# else, nothing to read, and it would raise a ZeroDivisionError
elif attr.dtype == str:
# blech
raise InterfaceError("Don't store strings in datasets!" +
'{}'.format(attr.name))
else:
raise InterfaceError('Unknown dtype in dataset for attribute' +
' {}'.format(attr.name))
elif attr.is_user_object:
# user object: recurse
output = read_object_from_hdf5(
h5item, h5_to_pydict=h5_to_pydict, verbosity=verbosity)
elif attr.dtype is np.ndarray:
output = np.array(h5item)
else:
# hdf5 attribute
output = attr.dtype(h5item)
return output
def vprint(stringdata):
"""
Verbose print
"""
if verbosity > 0:
print stringdata
#
# ~~~ begin main ~~~
#
data_format = check_input(h5group, ext_data_format)
vprint(' ')
vprint('Beginning read of {}. h5_to_pydict includes:'.format(str(h5group)))
for key in h5_to_pydict.keys():
vprint(' {}'.format(str(key)))
if h5group in h5_to_pydict:
# the target of the hard link has already been created
# this works because (h5groupA == h5groupB) iff they are hardlinks
# pointing to the same object in the hdf5 file.
# (specifically, they are equal but not identical,
# i.e. (h5groupA is h5groupB) is false
# for hard links pointing to the same object in the hdf5 file)
vprint(' Found {} in h5_to_pydict! Skipping'.format(str(h5group)))
output = h5_to_pydict[h5group]
return output
else:
vprint(' {} not in h5_to_pydict. adding and processing...'.format(
str(h5group)))
# start ouptput as an empty dictionary
output = {}
# add this object to the h5_to_pydict
h5_to_pydict[h5group] = output
for attr in data_format:
vprint(' Attribute {}'.format(attr.name))
hdf5_type = check_attr(h5group, attr)
if hdf5_type == 'none':
output[attr.name] = None
continue
elif hdf5_type == 'list':
i = 0
output[attr.name] = []
h5list = h5group[attr.name]
if attr.make_dset or attr.is_user_object:
# list elements are stored as hdf5 datasets or hdf5 groups
while str(i) in h5list:
h5item = h5list[str(i)]
output[attr.name].append(
read_item(attr, h5item, h5_to_pydict=h5_to_pydict))
i += 1
else:
# list elements are stored as hdf5 attributes
while str(i) in h5list.attrs:
h5item = h5list.attrs[str(i)]
output[attr.name].append(
read_item(attr, h5item, h5_to_pydict=h5_to_pydict))
i += 1
elif hdf5_type == 'dict':
output[attr.name] = {}
# read datasets, groups, and attributes
for key, h5item in h5group[attr.name].iteritems():
if key == 'obj_type':
continue
output[attr.name][key] = read_item(
attr, h5item, h5_to_pydict=h5_to_pydict)
for key, h5item in h5group[attr.name].attrs.iteritems():
if key == 'obj_type':
continue
output[attr.name][key] = read_item(
attr, h5item, h5_to_pydict=h5_to_pydict)
elif hdf5_type == 'single':
if attr.make_dset or attr.is_user_object:
h5item = h5group[attr.name]
else:
h5item = h5group.attrs[attr.name]
output[attr.name] = read_item(
attr, h5item, h5_to_pydict=h5_to_pydict)
else:
raise Exception(
'Unexpected hdf5_type on ' +
'{}, where did this come from?'.format(attr.name))
vprint(' ')
return output
def read_object_list_from_hdf5(h5group_or_filename, constructor,
*args, **kwargs):
"""
Create a list of objects, from an HDF5 file or group.
h5group_or_filename: either a h5py Group/File object, or a filename,
containing h5groups '000', '001', or so.
constructor: class method for constructing a single object.
*args, **kwargs: anything else to pass to constructor
except prefix, which indicates the text prefix to the index number
"""
def get_fmtstring(h5group, prefix):
"""
Figure out whether the h5 items are '0', '00', '000', etc.
"""
if prefix is None:
prefix = ''
for n in xrange(10):
if (prefix + str('0' * n)) in h5group.keys():
fmtstring = prefix + '{:0' + str(n) + 'd}'
return fmtstring
else:
raise Exception('Item #0 not found')
def construct_list(h5group, fmtstring, constructor, *args, **kwargs):
"""
Actually build the list of objects.
"""
i = 0
obj_list = []
while fmtstring.format(i) in h5group.keys():
key = fmtstring.format(i)
new_obj = constructor(h5group[key], *args, **kwargs)
obj_list.append(new_obj)
i += 1
return obj_list
# ~~~ begin main ~~~
if 'prefix' in kwargs:
prefix = kwargs['prefix']
del(kwargs['prefix'])
else:
prefix = ''
if (isinstance(h5group_or_filename, str)
or isinstance(h5group_or_filename, unicode)):
with h5py.File(h5group_or_filename, 'r') as h5group:
fmtstring = get_fmtstring(h5group, prefix)
obj_list = construct_list(
h5group, fmtstring, constructor, *args, **kwargs)
elif isinstance(h5group_or_filename, h5py.Group):
h5group = h5group_or_filename
fmtstring = get_fmtstring(h5group, prefix)
obj_list = construct_list(
h5group, fmtstring, constructor, *args, **kwargs)
else:
raise InterfaceError(
'write_objects_to_hdf5 needs either an h5py.Group ' +
'or a string filename')
return None
return obj_list
class InterfaceError(Exception):
pass
##############################################################################
# Testing #
##############################################################################
def main():
"""
Run tests.
"""
filename = generate_random_filename(ext='h5')
try:
test_IO_data_types(filename)
test_IO_obj_dict(filename)
test_IO_overwrite(filename)
test_write_objects_to_hdf5()
finally:
# if any exceptions are raised in the test, the file will not have
# been deleted by os.remove(). So try it here.
# (too lazy to actually check whether it's still there or not,
# so try/except))
try:
os.remove(filename)
except OSError:
pass
def generate_random_filename(ext='h5'):
"""
Generate a random 8-character filename, with extension ext.
"""
if ext and ext[0] == '.':
ext = ext[1:]
filebase = ''.join(
chr(i) for i in np.random.randint(97, 122, size=(8,)))
filename = '.'.join([filebase, ext])
return filename
class TestIO(object):
"""
For testing data_format handling in IO.
"""
class_name = 'TestIO'
def __init__(self, data_format, **kwargs):
"""
Initialize a TestIO object with a user-defined data_format and
list of keyword arguments.
"""
self.data_format = data_format
for key, val in kwargs.iteritems():
setattr(self, key, val)
def check_alg_results_IO(read_dict, orig_obj, uncertainty_flag=False):
"""
Check that the output from read_object_from_hdf5 (read_dict) has the same
data as the original object (orig_obj) for an AlgorithmResults object.
uncertainty_flag: also check alpha_unc and beta_unc
"""
assert np.all(read_dict['alpha_meas_deg'] == orig_obj.alpha_meas_deg)
assert np.all(read_dict['alpha_true_deg'] == orig_obj.alpha_true_deg)
assert np.all(read_dict['beta_meas_deg'] == orig_obj.beta_meas_deg)
assert np.all(read_dict['beta_true_deg'] == orig_obj.beta_true_deg)
assert np.all(read_dict['depth_um'] == orig_obj.depth_um)
assert np.all(read_dict['energy_tot_kev'] == orig_obj.energy_tot_kev)
assert np.all(read_dict['energy_dep_kev'] == orig_obj.energy_dep_kev)
assert np.all(read_dict['is_contained'] == orig_obj.is_contained)
assert read_dict['has_alpha'] == orig_obj.has_alpha
assert read_dict['has_beta'] == orig_obj.has_beta
if uncertainty_flag:
aunc1 = read_dict['alpha_unc']
aunc2 = orig_obj.alpha_unc
assert aunc1['angle_type'] == aunc2.angle_type
assert np.all(aunc1['delta'] == aunc2.delta)
assert aunc1['n_values'] == aunc2.n_values
assert aunc1['resolution'] == aunc2.resolution
assert np.all(aunc1['nhist'] == aunc2.nhist)
assert np.all(aunc1['xhist'] == aunc2.xhist)
bunc1 = read_dict['beta_unc']
bunc2 = orig_obj.beta_unc
assert bunc1['angle_type'] == bunc2.angle_type
assert np.all(bunc1['delta'] == bunc2.delta)
assert bunc1['n_values'] == bunc2.n_values
for i, unc2 in enumerate(read_dict['uncertainty_list']):
unc = orig_obj.uncertainty_list[i]
assert unc2['angle_type'] == unc.angle_type
assert np.all(unc2['delta'] == unc.delta)
assert unc2['n_values'] == unc.n_values
if unc2['angle_type'] == 'alpha':
assert unc2['resolution'] == unc.resolution
assert np.all(unc2['nhist'] == unc.nhist)
assert np.all(unc2['xhist'] == unc.xhist)
for metricname, metric in unc2['metrics'].iteritems():
assert metric['axis_max'] == unc.metrics[metricname].axis_max
assert metric['axis_min'] == unc.metrics[metricname].axis_min
assert metric['fit_name'] == unc.metrics[metricname].fit_name
assert metric['name'] == unc.metrics[metricname].name
assert metric['units'] == unc.metrics[metricname].units
assert metric['value'] == unc.metrics[metricname].value
unc = orig_obj.alpha_unc
iterator = read_dict['alpha_unc']['metrics'].iteritems()
for metricname, metric in iterator:
assert metric['axis_max'] == unc.metrics[metricname].axis_max
assert metric['axis_min'] == unc.metrics[metricname].axis_min
assert metric['fit_name'] == unc.metrics[metricname].fit_name
assert metric['name'] == unc.metrics[metricname].name
assert metric['units'] == unc.metrics[metricname].units
assert metric['value'] == unc.metrics[metricname].value
unc = orig_obj.beta_unc
iterator = read_dict['beta_unc']['metrics'].iteritems()
for metricname, metric in iterator:
assert metric['axis_max'] == unc.metrics[metricname].axis_max
assert metric['axis_min'] == unc.metrics[metricname].axis_min
assert metric['fit_name'] == unc.metrics[metricname].fit_name
assert metric['name'] == unc.metrics[metricname].name
assert metric['units'] == unc.metrics[metricname].units
assert metric['value'] == unc.metrics[metricname].value
def test_IO_data_types(filename):
def test_IO_singular(filename):
"""
test all 'singular' data types
write and read
"""
# TestIO objects and ClassAttr
data_format = (ClassAttr('int1', int),
ClassAttr('int2', int),
ClassAttr('str1', str),
ClassAttr('float1', float),
ClassAttr('bool1', bool),
ClassAttr('array1', np.ndarray))
array_data = np.array([1.0, 2.1, 3.2])
t = TestIO(data_format, int1=34, int2=-6, str1='asdf', float1=3.141,
bool1=False, array1=array_data)
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(t, h5file, 't')
with h5py.File(filename, 'r') as h5file:
t2 = read_object_from_hdf5(
h5file['t'], ext_data_format=data_format)
assert t2['int1'] == 34
assert t2['int2'] == -6
assert t2['str1'] == 'asdf'
assert t2['float1'] == 3.141
assert t2['bool1'] is False
assert np.all(t2['array1'] == array_data)
os.remove(filename)
def test_IO_lists(filename):
"""
lists and tuples
is_always_list, is_sometimes_list
"""
# test list (is_always_list)
data_format = (ClassAttr('float1', float),
ClassAttr('list1', int, is_always_list=True),
ClassAttr('str1', str))
listdata = [1, 3, 5, 7, 9]
t = TestIO(data_format, float1=-26.3, str1='foo', list1=listdata)
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(t, h5file, 't')
with h5py.File(filename, 'r') as h5file:
t2 = read_object_from_hdf5(
h5file['t'], ext_data_format=data_format)
assert t2['float1'] == -26.3
assert np.all(t2['list1'] == listdata)
assert t2['str1'] == 'foo'
os.remove(filename)
# test tuple (is_sometimes_list)
data_format = (ClassAttr('float1', float),
ClassAttr('list1', float, is_sometimes_list=True),
ClassAttr('str1', str))
listdata = (1.0, 3.3, 5.1)
t = TestIO(data_format, float1=-26.3, str1='foo', list1=listdata)
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(t, h5file, 't')
with h5py.File(filename, 'r') as h5file:
t2 = read_object_from_hdf5(
h5file['t'], ext_data_format=data_format)
assert t2['float1'] == -26.3
assert np.all(t2['list1'] == list(listdata))
assert t2['str1'] == 'foo'
os.remove(filename)
# test is_sometimes_list without a list
data_format = (ClassAttr('float1', float),
ClassAttr('maybelist1', int, is_sometimes_list=True),
ClassAttr('str1', str))
t = TestIO(data_format, float1=-26.3, str1='foo', maybelist1=3)
with h5py.File(filename) as h5file:
write_object_to_hdf5(t, h5file, 't')
with h5py.File(filename, 'r') as h5file:
t2 = read_object_from_hdf5(
h5file['t'], ext_data_format=data_format)
assert t2['float1'] == -26.3
assert np.all(t2['maybelist1'] == 3)
assert t2['str1'] == 'foo'
os.remove(filename)
def test_IO_dicts(filename):
"""
dicts
is_always_dict, is_sometimes_dict
"""
data_format = (ClassAttr('float1', float),
ClassAttr('dict1', str, is_always_dict=True),
ClassAttr('str1', str))
dictdata = {'foo': 'foovalue', 'bar': 'barvalue', 'asdf': 'qwerty'}
t = TestIO(data_format, float1=-26.3, str1='foo',
dict1=dictdata)
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(t, h5file, 't')
with h5py.File(filename, 'r') as h5file:
t2 = read_object_from_hdf5(
h5file['t'], ext_data_format=data_format)
assert t2['float1'] == -26.3
assert t2['str1'] == 'foo'
assert t2['dict1'] == dictdata
os.remove(filename)
# test is_sometimes_dict without a dict
data_format = (ClassAttr('float1', float),
ClassAttr('maybedict1', float, is_sometimes_dict=True),
ClassAttr('str1', str))
t = TestIO(data_format, float1=-26.3, str1='foo', maybedict1=3.5)
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(t, h5file, 't')
with h5py.File(filename, 'r') as h5file:
t2 = read_object_from_hdf5(
h5file['t'], ext_data_format=data_format)
assert t2['float1'] == -26.3
assert t2['str1'] == 'foo'
assert t2['maybedict1'] == 3.5
os.remove(filename)
def test_IO_dsets_none(filename):
"""
make_dset
may_be_none
"""
# test make_dset
data_format = (ClassAttr('float1', float),
ClassAttr('array1', np.ndarray, make_dset=True),
ClassAttr('str1', str))
arraydata = np.array(range(150))
t = TestIO(data_format, float1=-26.3, str1='foo',
array1=arraydata)
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(t, h5file, 't')
with h5py.File(filename, 'r') as h5file:
t2 = read_object_from_hdf5(
h5file['t'], ext_data_format=data_format)
assert t2['float1'] == -26.3
assert t2['str1'] == 'foo'
assert np.all(t2['array1'] == arraydata)
os.remove(filename)
# test may_be_none
data_format = (ClassAttr('float1', float),
ClassAttr('str1', str, may_be_none=True))
t = TestIO(data_format, float1=-26.3, str1=None)
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(t, h5file, 't')
with h5py.File(filename, 'r') as h5file:
t2 = read_object_from_hdf5(
h5file['t'], ext_data_format=data_format)
assert t2['float1'] == -26.3
assert t2['str1'] is None
os.remove(filename)
def test_IO_user_objects(filename):
"""
single user object
multi-level user objects
"""
# don't import at top of file! circular import with evaluation.py
from etrack.reconstruction import evaluation
# Real Classes:
# single user-defined object
alg_results = evaluation.generate_random_alg_results(length=10000)
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(
alg_results, h5file, 'alg_results')
with h5py.File(filename, 'r') as h5file:
ar2 = read_object_from_hdf5(h5file['alg_results'])
check_alg_results_IO(ar2, alg_results, uncertainty_flag=False)
os.remove(filename)
# multi-level object
alg_results.add_default_uncertainties()
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(
alg_results, h5file, 'alg_results')
with h5py.File(filename, 'r') as h5file:
ar2 = read_object_from_hdf5(h5file['alg_results'])
check_alg_results_IO(ar2, alg_results, uncertainty_flag=True)
os.remove(filename)
# test_IO_data_types() main
test_IO_singular(filename)
test_IO_lists(filename)
test_IO_dicts(filename)
test_IO_dsets_none(filename)
test_IO_user_objects(filename)
def test_IO_obj_dict(filename):
"""
test obj_dict hardlink capability
"""
from etrack.reconstruction import evaluation
# first, check a single object which is listed in obj_dict
alg_results = evaluation.generate_random_alg_results(length=10000)
alg_results.parent = [alg_results]
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(
alg_results, h5file, 'alg_results')
with h5py.File(filename, 'r') as h5file:
ar2 = read_object_from_hdf5(
h5file['alg_results'])
assert ar2['parent'][0] is ar2
os.remove(filename)
# check alpha_unc and beta_unc
alg_results.add_default_uncertainties()
alg_results.parent = [alg_results]
with h5py.File(filename, 'w') as h5file:
write_object_to_hdf5(
alg_results, h5file, 'alg_results')
with h5py.File(filename, 'r') as h5file:
ar2 = read_object_from_hdf5(
h5file['alg_results'])
assert ar2['parent'][0] is ar2
assert ar2['alpha_unc'] is ar2['uncertainty_list'][0]
assert ar2['beta_unc'] is ar2['uncertainty_list'][1]
os.remove(filename)
# check obj_dict with multiple objects written to the same file
# (has to be written in the same file session)
ar1 = evaluation.generate_random_alg_results(length=10000)
ar1.add_default_uncertainties()
ar2 = evaluation.generate_random_alg_results(length=1000)
ar2.add_default_uncertainties()
pyobj_to_h5 = {}
with h5py.File(filename, 'a') as h5file:
write_object_to_hdf5(ar1, h5file, 'ar1', pyobj_to_h5=pyobj_to_h5)
write_object_to_hdf5(ar2, h5file, 'ar2', pyobj_to_h5=pyobj_to_h5)
# should be hardlinked
write_object_to_hdf5(ar1, h5file, 'ar3', pyobj_to_h5=pyobj_to_h5)
h5_to_pydict = {}
with h5py.File(filename, 'r') as h5file:
ar1r = read_object_from_hdf5(h5file['ar1'], h5_to_pydict=h5_to_pydict)
ar2r = read_object_from_hdf5(h5file['ar2'], h5_to_pydict=h5_to_pydict)
ar3r = read_object_from_hdf5(h5file['ar3'], h5_to_pydict=h5_to_pydict)
check_alg_results_IO(ar1r, ar1, uncertainty_flag=True)
check_alg_results_IO(ar2r, ar2, uncertainty_flag=True)
check_alg_results_IO(ar3r, ar1, uncertainty_flag=True)
# this is the hardlink test:
assert ar1r is ar3r
os.remove(filename)
def test_IO_overwrite(filename):
"""
test the ability to overwrite objects in an existing HDF5 file
"""
from etrack.reconstruction import evaluation
# simple overwrite
alg_results = evaluation.generate_random_alg_results(length=10000)
alg_results.add_default_uncertainties()
with h5py.File(filename, 'a') as h5file:
write_object_to_hdf5(
alg_results, h5file, 'alg_results')
with h5py.File(filename, 'a') as h5file:
write_object_to_hdf5(
alg_results, h5file, 'alg_results')
with h5py.File(filename, 'r') as h5file:
ar2 = read_object_from_hdf5(h5file['alg_results'])
check_alg_results_IO(ar2, alg_results, uncertainty_flag=True)
os.remove(filename)
# writing two objects to the same file
ar1 = evaluation.generate_random_alg_results(length=10000)
ar1.add_default_uncertainties()
ar2 = evaluation.generate_random_alg_results(length=1000)
ar2.add_default_uncertainties()
with h5py.File(filename, 'a') as h5file:
write_object_to_hdf5(ar1, h5file, 'ar1')
write_object_to_hdf5(ar2, h5file, 'ar2')
with h5py.File(filename, 'r') as h5file:
ar1r = read_object_from_hdf5(h5file['ar1'])
ar2r = read_object_from_hdf5(h5file['ar2'])
check_alg_results_IO(ar1r, ar1, uncertainty_flag=True)
check_alg_results_IO(ar2r, ar2, uncertainty_flag=True)
# overwriting just one of the two objects
ar3 = evaluation.generate_random_alg_results(length=1000)
ar3.add_default_uncertainties()
with h5py.File(filename, 'a') as h5file:
write_object_to_hdf5(ar3, h5file, 'ar1')
with h5py.File(filename, 'r') as h5file:
ar1r = read_object_from_hdf5(h5file['ar1'])
ar2r = read_object_from_hdf5(h5file['ar2'])
check_alg_results_IO(ar1r, ar3, uncertainty_flag=True)
check_alg_results_IO(ar2r, ar2, uncertainty_flag=True)
os.remove(filename)
def test_write_objects_to_hdf5():
"""
test the multiple-object form of writing
"""
from etrack.reconstruction import evaluation
filename = generate_random_filename(ext='')
filename_h5 = filename + '.h5'
filename_hdf5 = filename + '.hdf5'
# h5file provided
# single object
ar = evaluation.generate_random_alg_results(length=1000)
ar.add_default_uncertainties()
with h5py.File(filename_h5, 'a') as h5file:
write_objects_to_hdf5(h5file, ar=ar)
with h5py.File(filename_h5, 'r') as h5file:
ar_read = read_object_from_hdf5(h5file['ar'])
check_alg_results_IO(ar_read, ar, uncertainty_flag=True)
os.remove(filename_h5)
# h5file provided
# multiple objects
ar1 = evaluation.generate_random_alg_results(length=1000)
ar1.add_default_uncertainties()
ar2 = evaluation.generate_random_alg_results(length=2000)
ar2.add_default_uncertainties()
ar3 = evaluation.generate_random_alg_results(length=3000)
ar3.add_default_uncertainties()
with h5py.File(filename_h5, 'a') as h5file:
filename_written = write_objects_to_hdf5(
h5file,
ar1=ar1, ar2=ar2, ar3=ar3, aunc=ar1.alpha_unc)
assert filename_written == filename_h5
h5_to_pydict = {}
with h5py.File(filename_h5, 'r') as h5file:
ar1_read = read_object_from_hdf5(
h5file['ar1'], h5_to_pydict=h5_to_pydict)
ar2_read = read_object_from_hdf5(
h5file['ar2'], h5_to_pydict=h5_to_pydict)
ar3_read = read_object_from_hdf5(
h5file['ar3'], h5_to_pydict=h5_to_pydict)
aunc = read_object_from_hdf5(h5file['aunc'], h5_to_pydict=h5_to_pydict)
check_alg_results_IO(ar1_read, ar1, uncertainty_flag=True)
check_alg_results_IO(ar2_read, ar2, uncertainty_flag=True)
check_alg_results_IO(ar3_read, ar3, uncertainty_flag=True)
# check hard link across multiple write calls (within a file session)
assert aunc is ar1_read['alpha_unc']
os.remove(filename_h5)
# filename provided, including extension (single object)
filename_written = write_objects_to_hdf5(filename_h5, ar=ar)
assert filename_written == filename_h5
with h5py.File(filename_h5, 'r') as h5file:
ar_read = read_object_from_hdf5(h5file['ar'])
check_alg_results_IO(ar_read, ar, uncertainty_flag=True)
os.remove(filename_h5)
# filename provided as *.hdf5
filename_written = write_objects_to_hdf5(filename_hdf5, ar=ar)
assert filename_written == filename_hdf5
with h5py.File(filename_hdf5, 'r') as h5file:
ar_read = read_object_from_hdf5(h5file['ar'])
check_alg_results_IO(ar_read, ar, uncertainty_flag=True)
os.remove(filename_hdf5)
# filename provided without extension. check that extension is added
filename_written = write_objects_to_hdf5(filename, ar=ar)
assert filename_written == filename_h5
with h5py.File(filename_h5, 'r') as h5file:
ar_read = read_object_from_hdf5(h5file['ar'])
check_alg_results_IO(ar_read, ar, uncertainty_flag=True)
os.remove(filename_h5)
if __name__ == '__main__':
main()
if False:
pdb.set_trace()
pass
|
Python
|
CL
|
c2c4f4a4727c6654256fed7b95c97a6bd5382f424d11aea4d1ece79a864e2c56
|
from django.db.models import Model, ManyToManyField, CharField, BooleanField
from django.contrib.auth.models import User
from capstoneproject.models.querysets.word_queryset import WordQuerySet
from capstoneproject.models.models.word_feature import WordFeature
class Word(Model):
"""A class representing the system's table of offensive Words."""
default = BooleanField(default=False)
word_features = ManyToManyField(WordFeature, related_name='words')
name = CharField(unique=True, max_length=30)
words = WordQuerySet.as_manager()
def __str__(self):
"""Overwrites the __str__ function to provide a string containing the
word's name.
Return:
str: The word's name.
"""
return 'Word: {}'.format(self.name)
def __repr__(self):
"""
Overwrites the __repr__ function to provide the name and features of
the word.
:return: A string containing the word's name and word features
"""
return 'word: {} features: {}'.format(self.name, self.word_features)
def _dict(self):
"""
Provides a dictionary mapping the word name to the word's features.
:return: A dictionary containing the word's name and word's features.
"""
return {self.name: self.get_word_features()}
def isDefault(self):
"""
Determines if this model instance is a default.
:return: True if this model instance is a default.
"""
return self.default
def isCustom(self):
"""
Determines if this model instance is User created.
:return: True if this model instance is User created.
"""
return not self.default
def isRelated(self):
"""
Determines if any relatives rely on this model instance.
:return: True if relatives rely on this model instance.
"""
return len(self.user_storage.all()) > 0 \
or len(self.word_features.all()) > 0
def isOrphaned(self):
"""
Determines if no relatives rely on this model instance.
:return: True if no relatives rely on this model instance.
"""
return len(self.user_storage.all()) == 0 \
and len(self.word_features.all()) == 0
def delete_relatives(self):
"""
Deletes this model instance's related model instances.
:return:
"""
word_features = list(self.word_features.all())
for word_feature in word_features:
if word_feature.isOrphaned() and word_feature.isCustom():
word_feature.delete()
def get_word_features(self):
"""
Provides the word's features
:return: A dictionary mapping a word to its features.
"""
word_features_list = []
for word_feature in self.word_features.all():
word_features_list.append(word_feature._dict())
return word_features_list
def get_categories(self):
"""
Provides a list of the offensive categories of which the word belongs.
:return: A list of offensive categories that the word is classified as.
"""
cats = list()
for word_feature in self.word_features.all():
cats.append(word_feature.category.name)
return cats
def delete(self, *args, **kwargs):
"""
Deletes this model instance along with any relatives that \
are only related to it.
:param *args: Possible arguments to the default delete method.
:param *kwargs: Possible arguments to the default delete method.
:return:
"""
self.delete_relatives()
super().delete(*args, **kwargs)
class Meta:
"""Settings for the Word model."""
default_manager_name = 'words'
|
Python
|
CL
|
5814bca2443a4c763fe9163376698fabad7f8f8fd60707504e4d257a75034004
|
#!/usr/bin/python3
# encoding=utf-8
# Copyright © 2016 Intel Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Flask webpage for performance data."""
import itertools
# pylint: disable=import-error
import flask
from flask_mako import MakoTemplates, render_template
# pylint: enable=import-error
APP = flask.Flask(__name__)
_ = MakoTemplates(APP)
# pylint: disable=bad-whitespace
_BENCHMARKS = [
('OglBatch0', 'micro-benchmark' ),
('OglBatch1', 'micro-benchmark' ),
('OglBatch2', 'micro-benchmark' ),
('OglBatch3', 'micro-benchmark' ),
('OglBatch4', 'micro-benchmark' ),
('OglBatch5', 'micro-benchmark' ),
('OglBatch6', 'micro-benchmark' ),
('OglBatch7', 'micro-benchmark' ),
('OglCSCloth', 'micro-benchmark' ),
('OglCSDof', 'micro-benchmark' ),
('OglDeferred', 'micro-benchmark' ),
('OglDeferredAA', 'micro-benchmark' ),
('OglDrvRes', 'micro-benchmark' ),
('OglDrvShComp', 'micro-benchmark' ),
('OglDrvState', 'micro-benchmark' ),
('OglFillPixel', 'micro-benchmark' ),
('OglFillTexMulti', 'micro-benchmark' ),
('OglFillTexSingle', 'micro-benchmark' ),
('OglGeomPoint', 'micro-benchmark' ),
('OglGeomTriList', 'micro-benchmark' ),
('OglGeomTriStrip', 'micro-benchmark' ),
('OglHdrBloom', 'micro-benchmark' ),
('OglMultithread', 'micro-benchmark' ),
('OglPSBump2', 'micro-benchmark' ),
('OglPSBump8', 'micro-benchmark' ),
('OglPSPhong', 'micro-benchmark' ),
('OglPSPom', 'micro-benchmark' ),
('OglShMapPcf', 'micro-benchmark' ),
('OglShMapVsm', 'micro-benchmark' ),
('OglTerrainFlyInst', 'micro-benchmark' ),
('OglTerrainPanInst', 'micro-benchmark' ),
('OglTerrainFlyTess', 'micro-benchmark' ),
('OglTerrainPanTess', 'micro-benchmark' ),
('OglTexFilterAniso', 'micro-benchmark' ),
('OglTexFilterTri', 'micro-benchmark' ),
('OglTexMem128', 'micro-benchmark' ),
('OglTexMem512', 'micro-benchmark' ),
('OglVSDiffuse1', 'micro-benchmark' ),
('OglVSDiffuse8', 'micro-benchmark' ),
('OglVSInstancing', 'micro-benchmark' ),
('OglVSTangent', 'micro-benchmark' ),
('OglZBuffer', 'micro-benchmark' ),
('egypt', 'synthetic-benchmark'),
('egypt_o', 'synthetic-benchmark'),
('fill', 'micro-benchmark' ),
('fill_o', 'micro-benchmark' ),
('fur', 'micro-benchmark' ),
('heaven', 'engine-demo' ),
('manhattan', 'synthetic-benchmark'),
('manhattan_o', 'synthetic-benchmark'),
('car_chase', 'synthetic-benchmark'),
('car_chase_o', 'synthetic-benchmark'),
('tess', 'synthetic-benchmark'),
('tess_o', 'synthetic-benchmark'),
('plot3d', 'micro-benchmark' ),
('trex', 'synthetic-benchmark'),
('trex_o', 'synthetic-benchmark'),
('triangle', 'micro-benchmark' ),
('valley', 'engine-demo' ),
('warsow', 'game-demo' ),
('xonotic', 'game-demo' ),
]
# pylint: enable=bad-whitespace
class _Getter(object):
"""A container for making working with benchmark data easier.
Stores dictionaries relating each element to each other, allowing for fast
searches.
"""
def __init__(self):
self.by_name = dict(iter(_BENCHMARKS))
self.by_category = {c: [n[0] for n in b] for c, b in itertools.groupby(
sorted(_BENCHMARKS, key=lambda x: x[1]), lambda x: x[1])}
GETTER = _Getter()
@APP.route('/')
def front():
return render_template('index.html.mako', getter=GETTER)
@APP.route('/apps/all')
def all(): # pylint: disable=redefined-builtin
return render_template('apps.html.mako', benchmarks=dict(_BENCHMARKS),
category="All Benchmarks")
@APP.route('/apps/<benchmark>')
def apps(benchmark):
return render_template(
'apps.html.mako',
benchmarks=[benchmark],
category=None)
@APP.route('/categories/<category>')
def categories(category):
return render_template(
'apps.html.mako',
benchmarks=GETTER.by_category[category],
category=category)
if __name__ == '__main__':
APP.run()
|
Python
|
CL
|
99c9dba84edfec000f18301bc35e946b73a44ecfce4e3c9432552967c61997e4
|
#-------------------------------------------------------------------------------
#
# simple tool comparing XML documents - powered by Python miniDOM
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
#
""" Simple XML documets' comparator. """
import xml.dom.minidom as dom
from django.utils.six import string_types
# define node types
ELEMENT_NODE = dom.Element.ELEMENT_NODE
ATTRIBUTE_NODE = dom.Element.ATTRIBUTE_NODE
TEXT_NODE = dom.Element.TEXT_NODE
CDATA_SECTION_NODE = dom.Element.CDATA_SECTION_NODE
ENTITY_REFERENCE_NODE = dom.Element.ENTITY_REFERENCE_NODE
ENTITY_NODE = dom.Element.ENTITY_NODE
PROCESSING_INSTRUCTION_NODE = dom.Element.PROCESSING_INSTRUCTION_NODE
COMMENT_NODE = dom.Element.COMMENT_NODE
DOCUMENT_NODE = dom.Element.DOCUMENT_NODE
DOCUMENT_TYPE_NODE = dom.Element.DOCUMENT_TYPE_NODE
DOCUMENT_FRAGMENT_NODE = dom.Element.DOCUMENT_FRAGMENT_NODE
NOTATION_NODE = dom.Element.NOTATION_NODE
# define note type to string conversion
NODE_DICT = {
ELEMENT_NODE : "ELEMENT_NODE",
ATTRIBUTE_NODE : "ATTRIBUTE_NODE",
TEXT_NODE : "TEXT_NODE",
CDATA_SECTION_NODE : "CDATA_SECTION_NODE",
ENTITY_REFERENCE_NODE : "ENTITY_REFERENCE_NODE",
ENTITY_NODE : "ENTITY_NODE",
PROCESSING_INSTRUCTION_NODE : "PROCESSING_INSTRUCTION_NODE",
COMMENT_NODE : "COMMENT_NODE",
DOCUMENT_NODE : "DOCUMENT_NODE",
DOCUMENT_TYPE_NODE : "DOCUMENT_TYPE_NODE",
DOCUMENT_FRAGMENT_NODE : "DOCUMENT_FRAGMENT_NODE",
NOTATION_NODE : "NOTATION_NODE",
}
# exceptions
class XMLError( Exception ) :
""" XML base error error """
class XMLParseError( XMLError ) :
""" XML parse error """
class XMLMismatchError( XMLError ) :
""" XML mismatch error """
#-------------------------------------------------------------------------------
# low level utilities
def _getNodeName( node ) :
""" get full node name in '{namespace}tagName' format """
if ( node.namespaceURI is None ) :
return node.nodeName
else :
return "{%s}%s"%( node.namespaceURI , node.localName )
def _packName( pair ) :
""" pack the (<namespace>,<localname>) tuple to curly bracket notation
{<namespace>}<localname> """
if ( pair[0] is None ) :
return pair[1]
else :
return "{%s}%s"%(pair[0],pair[1])
def _skipIgnorable( node , path ) :
""" get node sibling skipping empty text nodes and comments """
while ( node is not None ) :
# expected nodes - return immediatelly
if node.nodeType in (ELEMENT_NODE,CDATA_SECTION_NODE): break
# special treatment of text nodes - ignore blank text
if node.nodeType == TEXT_NODE :
# ignore blank text
if 0 < len( node.wholeText.strip() ) : break
# unexpected nodes - raise exception
if node.nodeType in (ATTRIBUTE_NODE,DOCUMENT_NODE,DOCUMENT_FRAGMENT_NODE,
NOTATION_NODE,ENTITY_REFERENCE_NODE,ENTITY_NODE,DOCUMENT_TYPE_NODE):
raise XMLParseError("Unexpected child node '%s' ! PATH='%s'" % (NODE_DICT[node.nodeType],path))
# the rest is just ignored
#if node.nodeType in (COMMENT_NODE,PROCESSING_INSTRUCTION_NODE) : pass
node = node.nextSibling
return node
def _compareAttributes( a0 , a1 , level , path , verbose = False ) :
# both nodes have no attributes
if ( a0 is None ) and ( a1 is None ) : return
#attribute mismatch
if ( a0 is None ) or ( a1 is None ) :
raise XMLMismatchError("Attribute mismatch! PATH=\"%s\""%path)
# get list of attributes and filter-out namespace definitions
isNotNS = lambda v : ( v[0][0] != "http://www.w3.org/2000/xmlns/" )
packName = lambda v : ( _packName(v[0]) , v[1].strip() )
items0 = sorted( map( packName , filter( isNotNS , a0.itemsNS() ) ) )
items1 = sorted( map( packName , filter( isNotNS , a1.itemsNS() ) ) )
if len( items0 ) != len( items0 ) :
if verbose :
for item in items0 :
print (" < \t %s@%s=\"%s\"" %( path , item[0] , item[1] ))
for item in items1 :
print (" > \t %s@%s=\"%s\"" %( path , item[0] , item[1] ))
raise XMLMismatchError("Attribute count mismatch! PATH=\"%s\""%path)
for pair in zip( items0 , items1 ) :
if verbose :
print (" < \t %s@%s=\"%s\"" %( path , pair[0][0] , pair[0][1] ))
print (" > \t %s@%s=\"%s\"" %( path , pair[1][0] , pair[1][1] ))
if ( pair[0] != pair[1]) :
raise XMLMismatchError("Attribute mismatch! PATH=\"%s\""%path)
def _compareNode( n0 , n1 , level = 0 , path = "/" , verbose = False ) :
""" compare DOM node or element subtree """
#nn0 , nn1 = _getNodeName( n0 ), _getNodeName( n1 )
nn0 , nn1 = n0.nodeName, n1.nodeName
path0 = "%s/%s"%( path , nn0 ) if level > 1 else "/%s"%nn0 if level == 1 else _getNodeName( n0 )
path1 = "%s/%s"%( path , nn1 ) if level > 1 else "/%s"%nn1 if level == 1 else _getNodeName( n0 )
if verbose :
print ("< \t %s" %( path0 ))
print ("> \t %s" %( path1 ))
# compare node name and node type
if (( n0.nodeType != n1.nodeType )
or ( _getNodeName( n0 ) != _getNodeName( n1 ) )):
raise XMLMismatchError("Node mismatch! PATH0=\"%s\" vs. PATH1=\"%s\""%(path0,path1))
# compare attributes
_compareAttributes( n0.attributes , n1.attributes , level , path0 , verbose )
# in case of text-nodes and CDATA section check the content
if n0.nodeType == TEXT_NODE :
if verbose :
print (" < TEXT: \t \"%s\"" % n0.wholeText.strip())
print (" > TEXT: \t \"%s\"" % n1.wholeText.strip())
if n0.wholeText.strip() != n1.wholeText.strip() :
raise XMLMismatchError("Text mismatch! PATH=\"%s\""%(path))
return
if n0.nodeType == CDATA_SECTION_NODE :
if verbose :
print (" < CDATA: \t \"%s\"" % n0.wholeText)
print (" > CDATA: \t \"%s\"" % n1.wholeText)
if n0.wholeText != n1.wholeText :
raise XMLMismatchError("CDATA mismatch! PATH=\"%s\""%(path))
return
# get first child
nn0 = _skipIgnorable( n1.firstChild , path )
nn1 = _skipIgnorable( n0.firstChild , path )
while ( nn0 is not None ) and ( nn1 is not None ) :
# sublevel comparison
_compareNode( nn0 , nn1 , level+1 , path0 , verbose )
#get next sibling
nn0 = _skipIgnorable( nn0.nextSibling , path )
nn1 = _skipIgnorable( nn1.nextSibling , path )
# make sure there are no remaining nodes
if not (( nn0 is None ) and ( nn1 is None )) :
raise XMLMismatchError("Childern count mismatch! PATH=\"%s\""%path0)
#-------------------------------------------------------------------------------
def xmlCompareDOMs( xml0 , xml1 , verbose = False ) :
""" Compare two XML documents passed as DOM trees (xml.dom.minidom)."""
return _compareNode( xml0 , xml1 , verbose = verbose )
def xmlCompareStrings( str0 , str1 , verbose = False ) :
""" Compare two XML documents passed as strings. """
def parse( src , label ) :
try :
return dom.parseString( src )
except Exception as e :
raise XMLParseError("Failed to parse %s XML string! %s" % ( label , str(e) ))
return xmlCompareDOMs( parse(str0,"the first") , parse(str1,"the second") , verbose )
def xmlCompareFiles( src0 , src1 , verbose = False ) :
""" Compare two XML documents passed as filenames, file or file-like objects."""
def parseFileName( src ) :
try :
with open( src ) as fid :
return dom.parse( fid )
except Exception as e :
raise XMLParseError("Failed to parse the \"%s\" file! %s" % ( src , str(e) ))
def parseFileObj( src , label ) :
try :
return dom.parse( src )
except Exception as e :
raise XMLParseError("Failed to parse the %s XML file(-like) object! %e" % ( label , str(e) ))
def parse( src , label ) :
return parseFileName( src ) if ( type(src) in string_types ) else parseFileObj( src , label )
return xmlCompareDOMs( parse(src0,"the first") , parse(src1,"the second") , verbose )
#-------------------------------------------------------------------------------
|
Python
|
CL
|
0fd31dc9e670273799c8b4bb61f9dad6dd1fcc51d749873a89d84773feb688df
|
#!/usr/bin/python
# A set is a collection which is unordered and unindexed. In Python sets are written with curly brackets
s = {"a", "b", "c"}
print(s)
for x in s:
print(x) # iterating the set
print("banana" in s) # check for presence of item
s.add("d") # add item to set
s.update(["e", "f", "g"]) # adding multiple items
s.remove("c") # remove item ... if not present, error
s.discard("d") # remove item ... if not present, no error
x = s.pop() # to remove item but doesn't know which item has been removed
print(x) # so it is stored as a var
s.clear() # to empty the set
del s # to delete the set
# add() Adds an element to the set
# clear() Removes all the elements from the set
# copy() Returns a copy of the set
# difference() Returns a set containing the difference between two or more sets
# difference_update() Removes the items in this set that are also included in another, specified set
# discard() Remove the specified item
# intersection() Returns a set, that is the intersection of two other sets
# intersection_update() Removes the items in this set that are not present in other, specified set(s)
# isdisjoint() Returns whether two sets have a intersection or not
# issubset() Returns whether another set contains this set or not
# issuperset() Returns whether this set contains another set or not
# pop() Removes an element from the set
# remove() Removes the specified element
# symmetric_difference() Returns a set with the symmetric differences of two sets
# symmetric_difference_update() inserts the symmetric differences from this set and another
# union() Return a set containing the union of sets
# update() Update the set with the union of this set and others
|
Python
|
CL
|
c110c304baf1fdae85234831e1379488104a66d27dc10b5d65b3ec0768b1172c
|
# -*- coding: UTF-8 -*-
import queue
import sys
from select import select
from absEvent import AbsEvent
import socket
class XtSelect:
def __init__(self, ip='0.0.0.0', port=8090):
self.ip = ip
self.port = port
self.server_socket = socket.socket()
self.output_list = []
self.input_list = []
self.create_socket()
self.ep_read()
def create_socket(self):
try:
# SO_REUSEADDR 端口被释放后立即使用
server_address = (self.ip, self.port)
self.server_socket.bind(server_address)
self.server_socket.listen(10)
self.server_socket.setblocking(False)
# 初始化将服务端加入监听列表
self.input_list.append(self.server_socket)
except Exception as e:
sys.stdout.write('创建socket失败! %s\n' % str(e))
def ep_read(self):
while True:
# 开始 select 监听,对input_list中的服务端server进行监听
stdinput, stdoutput, stderr = select(self.input_list, self.output_list, self.input_list)
for obj in stdinput:
if obj == self.server_socket:
# 接收客户端的连接, 获取客户端对象和客户端地址信息
conn, addr = self.server_socket.accept()
# 将客户端对象也加入到监听的列表中, 当客户端发送消息时 select 将触发
self.input_list.append(conn)
AbsEvent.ev_connect(conn)
else:
# 由于客户端连接进来时服务端接收客户端连接请求,将客户端加入到了监听列表中(input_list),客户端发送消息将触发
# 所以判断是否是客户端对象触发
try:
if obj not in self.output_list:
recvs = obj.recv(1024)
if not recvs:
# 客户端断开连接了,将客户端的监听从input列表中移除
self.input_list.remove(obj)
AbsEvent.ev_hup(obj)
else:
print(recvs)
self.output_list.append(obj)
AbsEvent.ev_read(obj)
except ConnectionResetError:
# 客户端断开连接了,将客户端的监听从input列表中移除
self.input_list.remove(obj)
print("\n[input] Client {0} disconnected".format(addr))
# 如果现在没有客户端请求,也没有客户端发送消息时,开始对发送消息列表进行处理,是否需要发送消息
for sendobj in self.output_list:
try:
# 如果消息队列中有消息,从消息队列中获取要发送的消息
if not self.message_queue[sendobj].empty():
# 从该客户端对象的消息队列中获取要发送的消息
send_data = self.message_queue[sendobj].get()
sendobj.sendall(send_data)
else:
# 将监听移除等待下一次客户端发送消息
self.output_list.remove(sendobj)
except ConnectionResetError:
# 客户端连接断开了
del self.message_queue[sendobj]
self.output_list.remove(sendobj)
print("\n[output] Client {0} disconnected".format(addr))
if __name__ == '__main__':
st = XtSelect()
|
Python
|
CL
|
e17bb54b20df582979456117fa387d906003ebf73a1c050b12ec7af7a689fb70
|
"""
username1: cat dog
username2: elephant fox tiger duck
username1: giraffe
hi this is joydeep
logfile
list username, message type
find the k most talkative users
number of words each user types
param k is also given
edge cases:
the file empty?
messages are sep by any whitespace
size of the input is m
no. of users is n
algo:
init a hashmap of the {users: count}
read the file line by line and then update the count corr to the users
once the full file is read then create a heap of the the counts with the users
then get the first k elements from the heap
going through the file
time complex: O(n * characters in the line)
space complex: O(n)
getting the top k:
time complexity: O(klogn)
space complexity: O(n)
"""
from heapq import heapify, heappush, heappop
from collections import defaultdict
class FileReader:
"""
A file reader to implement some file methods
Args:
filename (str): path to the filename
"""
def __init__(self, filename):
self.filename = filename
def get_username_words(self, line):
i = 0
for i, ch in enumerate(line):
if ch == ":":
if i+1 < len(line):
return line[:i], line[i+1:]
else:
return line[:i], ""
def get_usercounts(self):
"""
Get the word counts from the file for the users
going through the file
time complex: O(n * characters in the line)
space complex: O(n)
Returns
dict: username -> counts
"""
word_counts = defaultdict(int) # {}
with open(self.filename) as f:
for line in f:
if line:
username, words = self.get_username_words(line) # username1, cat dog
num_words = len(words.split()) # 1
word_counts[username] += num_words # {u1: 3, u2: 4, }
return word_counts
def k_most_talkative(self):
"""
get k most talkative users
getting the top k:
time complexity: O(n + klogn)
space complexity: O(n)
Yields:
username (str)
"""
word_counts = self.get_usercounts() # {u1: 3, u2: 4, }
word_counts_heap = [(-count, username) for username, count in word_counts.items()] # [(-4, username), (-3, username)]
heapify(word_counts_heap) # [(-4, u2), (-3, u1)]
counter = 0
while word_counts_heap or counter < k:
_, username = heappop(word_counts_heap)
counter += 1 # 1, 2
yield username # u2, u1
"""
username1: cat dog
username2: elephant fox tiger duck
username1: giraffe
username1: cat dog
username2: elephant fox tiger
username1: giraffe
"""
|
Python
|
CL
|
38e88c75e09a08e041342cef208eb67767c443c0d0c9a0d3e5e71ee6c677f63c
|
import glob
import os
import argparse
import tqdm
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.utils import data
from torch.optim import SGD, lr_scheduler
from torch.backends import cudnn
from torchvision import transforms
from torchvision.datasets import CIFAR10
#from fast_adv.models.cifar10.model_mixed_attention import wide_resnet
from fast_adv.models.cifar10.model_attention import wide_resnet
from fast_adv.utils import AverageMeter, save_checkpoint, requires_grad_, NormalizedModel, VisdomLogger
from fast_adv.attacks import DDN
image_mean = torch.tensor([0.491, 0.482, 0.447]).view(1, 3, 1, 1)
image_std = torch.tensor([0.247, 0.243, 0.262]).view(1, 3, 1, 1)
DEVICE = torch.device('cuda:0' if (torch.cuda.is_available() ) else 'cpu')
test_transform = transforms.Compose([
transforms.ToTensor(),
])
input='./data/cifar10'
#path='/media/wanghao/000F5F8400087C68/CYJ-5-29/DDN/fast_adv/attacks/DeepFool'
#test_set=CIFAR10(input, train=False, transform=test_transform, download=True)
test_set = data.Subset(CIFAR10(input, train=True, transform=test_transform, download=True),list(range(0,30000)))
train_set = data.Subset(CIFAR10(input, train=True, transform=test_transform, download=True),list(range(48000, 50000)))
test_loader = data.DataLoader(train_set, batch_size=100, shuffle=False, num_workers=2, pin_memory=True)
m = wide_resnet(num_classes=10, depth=28, widen_factor=10, dropRate=0.3)
model = NormalizedModel(model=m, mean=image_mean, std=image_std).to(DEVICE) # keep images in the [0, 1] range
weight_norm='/media/unknown/Data/PLP/fast_adv/defenses/weights/best/2Norm_cifar10_ep_184_val_acc0.9515.pth'
weight_AT='./weights/best/2AT_cifar10_ep_13_val_acc0.8770.pth'
weight_ALP='/media/unknown/Data/PLP/fast_adv/defenses/weights/AT+ALP/cifar10acc0.8699999809265136_50.pth'
weight_conv_mixatten='/media/unknown/Data/PLP/fast_adv/defenses/weights/cifar10_mixed_Attention/cifar10acc0.8759999752044678_100.pth'
weight_attention='/media/unknown/Data/PLP/fast_adv/defenses/weights/cifar10_Attention/cifar10acc0.8729999780654907_120.pth'
weight_smooth='/media/unknown/Data/PLP/fast_adv/defenses/weights/best/2random_smooth_cifar10_ep_120_val_acc0.8510.pth'
weight_025smooth='/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25random_smooth_cifar10_ep_146_val_acc0.8070.pth'
weight_05smooth='/media/unknown/Data/PLP/fast_adv/defenses/weights/shape_0.5_random/cifar10acc0.6944999784231186_50.pth'
weight_025conv_mixatten='/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25MixedAttention_mixed_attention_cifar10_ep_50_val_acc0.8720.pth'
weight_05conv_mixatten='/media/unknown/Data/PLP/fast_adv/defenses/weights/shape_0.5_cifar10_mixed_Attention/cifar10acc0.8434999763965607_130.pth'
weight_1conv_mixatten='/media/unknown/Data/PLP/fast_adv/defenses/weights/best/1MixedAttention_mixed_attention_cifar10_ep_25_val_acc0.7080.pth'
weight_025conv_mixatten_ALP = '/media/unknown/Data/PLP/fast_adv/defenses/weights/best/0.25Mixed+ALP_cifar10_ep_85_val_acc0.8650.pth'
model_dict = torch.load(weight_AT)
model.load_state_dict(model_dict)
model.eval()
with torch.no_grad():
for i, (images, labels) in enumerate(tqdm.tqdm(test_loader, ncols=80)):
images, labels = images.to(DEVICE), labels.to(DEVICE)
noise = torch.randn_like(images, device='cuda') * 0#l2norm=1时,对应norm=10
#print(torch.norm(noise))#l2norm=1时,对应norm=10
image_shape = images + noise
#image_shape = torch.renorm(image_shape - images, p=2, dim=0, maxnorm=1) + images
#logits,_ = model.forward_attention(images.detach(), image_shape.detach())
logits = model(image_shape.detach())
#logits=model(image_shape)
test_accs = AverageMeter()
test_losses = AverageMeter()
test_accs.append((logits.argmax(1) == labels).float().mean().item())
# print(test_accs)
# test_losses.append(loss.item())
print('\nTest accuracy ', test_accs.avg)
|
Python
|
CL
|
ef9a366119ecac48c120745d0d1947d1393d32d423b503b9236811e5bca76df3
|
import os
import json
import pandas as pd
import numpy as np
import torch
from pathlib import Path
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.loggers import TensorBoardLogger
from sklearn.model_selection import train_test_split
from transformers import AdamW, T5ForConditionalGeneration, T5TokenizerFast
from tqdm.auto import tqdm
os.environ["TOKENIZERS_PARALLELISM"] = "true"
df = pd.read_csv("/home/felix/Desktop/DeepLearning-pyTorch/Lightning_learning/data/news_summary.csv", encoding="latin-1")
df = df[["text", "ctext"]]
df.columns = ["summary", "text"]
print(df.head())
df = df.dropna()
train_df, test_df = train_test_split(df, test_size=0.15)
class NewsSummaryDataset(Dataset):
def __init__(self, data: pd.DataFrame, tokenizer: T5TokenizerFast, text_max_token_len = 512, summary_max_token_len = 128):
self.tokenizer = tokenizer
self.data = data
self.text_max_token_len = text_max_token_len
self.summary_max_token_len = summary_max_token_len
def __len__(self):
return len(self.data)
def __getitem__(self, index: int):
data_row = self.data.iloc[index]
text = data_row["text"]
text_encoding = self.tokenizer(text,
max_length=self.text_max_token_len,
padding='max_length',
truncation=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors='pt'
)
summary_encoding = self.tokenizer(text,
max_length=self.summary_max_token_len,
padding='max_length',
truncation=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors='pt'
)
labels = summary_encoding["input_ids"]
labels[labels == 0] = -100
return dict(text=text,
summary=data_row["summary"],
text_input_ids=text_encoding["input_ids"],
text_attention_mask=text_encoding["attention_mask"].flatten(),
labels=labels.flatten(),
labels_attention_mask=summary_encoding["attention_mask"].flatten()
)
class NewsSummaryDataModule(pl.LightningDataModule):
def __init__(self, train_df: pd.DataFrame, test_df: pd.DataFrame, tokenizer: T5TokenizerFast, batch_size: int = 8, text_max_token_len: int = 512, summary_max_token_len: int = 128):
super().__init__()
self.train_df = train_df
self.test_df = test_df
self.batch_size = batch_size
self.tokenizer = tokenizer
self.text_max_token_len = text_max_token_len
self.summary_max_token_len = summary_max_token_len
def setup(self, stage=None):
self.train_dataset = NewsSummaryDataset(
self.train_df,
self.tokenizer,
self.text_max_token_len,
self.summary_max_token_len
)
self.test_dataset = NewsSummaryDataset(
self.test_df,
self.tokenizer,
self.text_max_token_len,
self.summary_max_token_len
)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=8
)
def val_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=8
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=8
)
MODEL_NAME = 't5-base'
tokenizer = T5TokenizerFast.from_pretrained(MODEL_NAME)
text_token_counts, summary_token_counts = [], []
for _, row in train_df.iterrows():
text_token_count = len(tokenizer.encode(row["text"]))
text_token_counts.append(text_token_count)
summary_token_count = len(tokenizer.encode(row["summary"]))
summary_token_counts.append(summary_token_count)
print('Text Tokens :' + str(sum(text_token_counts)))
print('Summary Tokens :' + str(sum(summary_token_counts)))
N_EPOCHS = 3
BATCH_SIZE = 8
data_module = NewsSummaryDataModule(train_df, test_df, tokenizer, batch_size=BATCH_SIZE)
# MODEL
class NewsSummaryModel(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = T5ForConditionalGeneration.from_pretrained(MODEL_NAME, return_dict=True)
def forward(self, input_ids, attention_mask, decoder_attention_mask, labels=None):
loss, logits = self.model(input_ids,
attention_mask=attention_mask,
labels=labels,
decoder_attention_mask=decoder_attention_mask
)
return loss, logits
def training_step(self, batch, batch_idx):
input_ids = batch["text_input_ids"]
attention_mask = batch["text_attention_mask"]
labels = batch["labels"]
labels_attention_mask = batch["labels_attention_mask"]
loss, outputs = self(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_attention_mask=labels_attention_mask,
labels=labels
)
self.log("train_loss", loss, prog_bar=True, logger=True)
return loss
def validation_step(self, batch, batch_idx):
input_ids = batch["text_input_ids"]
attention_mask = batch["text_attention_mask"]
labels = batch["labels"]
labels_attention_mask = batch["labels_attention_mask"]
loss, outputs = self(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_attention_mask=labels_attention_mask,
labels=labels
)
self.log("val_loss", loss, prog_bar=True, logger=True)
return loss
def test_step(self, batch, batch_idx):
input_ids = batch["text_input_ids"]
attention_mask = batch["text_attention_mask"]
labels = batch["labels"]
labels_attention_mask = batch["labels_attention_mask"]
loss, outputs = self(
input_ids=input_ids,
attention_mask=attention_mask,
decoder_attention_mask=labels_attention_mask,
labels=labels
)
self.log("test_loss", loss, prog_bar=True, logger=True)
return loss
def configure_optimizers(self):
return AdamW(self.parameters(), lr=0.0001)
model = NewsSummaryModel()
checkpoint_callback = ModelCheckpoint(
dirpath="checkpoints",
filename="best-checkpoint",
save_top_k=1,
verbose=True,
monitor="val_loss",
mode="min"
)
logger = TensorBoardLogger(save_dir="Lightning_logs", name="news_summary")
early_stopping_callback = EarlyStopping(
monitor="val_loss",
min_delta=0.01,
patience=2,
verbose=True
)
trainer = pl.Trainer(logger=logger,
callbacks=[checkpoint_callback, early_stopping_callback],
max_epochs=N_EPOCHS,
gpus=0,
progress_bar_refresh_rate=1
)
trainer.fit(model, data_module)
trained_model = NewsSummaryModel.load_from_checkpoint(
trainer.checkpoint_callback.best_model_path
)
trained_model.freeze()
def summarize(text):
text_encoding = tokenizer(
text,
max_length=512,
padding="max_length",
truncation=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt"
)
generated_ids = trained_model.model.generate(
input_ids=text_encoding["input_ids"],
attention_mask=text_encoding["attention_mask"],
max_length=150,
num_beams=2,
repetition_penalty=2.5,
length_penalty=1.0,
early_stopping=True
)
preds = [
tokenizer.decode(gen_id, skip_special_tokens=True, clean_up_tokenization_spaces=True)
for gen_id in generated_ids
]
return "".join(preds)
sample_row = test_df.iloc[0]
text = sample_row["text"]
model_summary = summarize(text)
print(model_summary)
|
Python
|
CL
|
1f1d21706947ad7426f41e3a40f63e0bc78f4049c69e0314c351d6aeb06bf969
|
__author__ = 'Aubrey'
import copy
from copy import deepcopy
import numpy as np
from numpy.linalg import norm
import method
from preprocessing import NanLabelEncoding, NanLabelBinarizer
from data import data as data_lib
from sklearn.neighbors import KernelDensity
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from results_class.results import Output
import cvxpy as cvx
import scipy
from configs.base_configs import MethodConfigs
class TargetTranfer(method.Method):
def __init__(self, configs=MethodConfigs()):
super(TargetTranfer, self).__init__(configs)
self.base_learner = method.SKLLogisticRegression(configs)
self.cv_params = {}
self.base_learner.experiment_results_class = self.experiment_results_class
def train(self, data):
self.base_learner.train_and_test(data)
def train_and_test(self, data):
data_copy = self._prepare_data(data,include_unlabeled=True)
results = super(TargetTranfer, self).train_and_test(data_copy)
return results
def _prepare_data(self, data, include_unlabeled=True):
target_labels = self.configs.target_labels
data_copy = data.get_transfer_subset(target_labels,include_unlabeled=include_unlabeled)
data_copy = data_copy.get_subset(data_copy.is_target)
#TODO: Not sure why I was doing this before
#is_source = ~data_copy.has_true_label(target_labels)
#data_copy.type[is_source] = data_lib.TYPE_SOURCE
#data_copy.is_train[is_source] = True
#data_copy = data.get_with_labels(target_labels)
return data_copy
def predict(self, data):
o = self.base_learner.predict(data)
if self.label_transform is not None:
o.true_y = self.label_transform.transform(o.true_y)
return o
@method.Method.estimated_error.getter
def estimated_error(self):
return self.base_learner.estimated_error
@property
def prefix(self):
return 'TargetTransfer+' + self.base_learner.prefix
class FuseTransfer(TargetTranfer):
def __init__(self, configs=MethodConfigs()):
super(FuseTransfer, self).__init__(configs)
self.use_oracle = False
#self.target_weight_scale = None
self.target_weight_scale = .75
self.label_transform = NanLabelBinarizer()
def train(self, data):
is_labeled_train = data.is_labeled & data.is_train
n_labeled_target = (data.is_train & is_labeled_train).sum()
n_labeled_source = (data.is_train & is_labeled_train).sum()
data.instance_weights = np.ones(data.n)
if self.target_weight_scale is not None:
assert 0 <= self.target_weight_scale <= 1
data.instance_weights[data.is_source] /= n_labeled_source
data.instance_weights[data.is_target] /= n_labeled_target
data.instance_weights[data.is_target] *= self.target_weight_scale
data.instance_weights[data.is_source] *= (1-self.target_weight_scale)
y_old = data.y
if self.label_transform is not None:
data.y = self.label_transform.fit_transform(data.y)
super(FuseTransfer, self).train(data)
data.y = y_old
def _prepare_data(self, data,include_unlabeled=True):
source_labels = self.configs.source_labels
target_labels = self.configs.target_labels
data_copy = copy.deepcopy(data)
if data.data_set_ids is not None:
#assert source_labels is None
#assert target_labels is None
#data_copy.type[data_copy.data_set_ids > 0] = data_lib.TYPE_SOURCE
for i in source_labels:
data_copy.type[data_copy.data_set_ids == i] = data_lib.TYPE_SOURCE
return data_copy
#source_inds = array_functions.find_set(data_copy.true_y,source_labels)
if self.use_oracle:
oracle_labels = self.configs.oracle_labels
data_copy = data_copy.get_transfer_subset(
np.concatenate((oracle_labels.ravel(),target_labels.ravel())),
include_unlabeled=True
)
data_copy.data_set_ids = np.zeros(data_copy.n)
for i, s in enumerate(source_labels):
source_inds = data_copy.get_transfer_inds(s)
if not data_copy.is_regression:
data_copy.change_labels(s, target_labels)
data_copy.type[source_inds] = data_lib.TYPE_SOURCE
data_copy.is_train[source_inds] = True
data_copy.data_set_ids[source_inds] = i+1
if getattr(self, 'use_all_source', False):
data_copy.reveal_labels(data_copy.is_source)
return data_copy
@property
def prefix(self):
s = 'FuseTransfer+' + self.base_learner.prefix
if 'target_weight_scale' in self.__dict__ and self.target_weight_scale is not None:
s += '-tws=' + str(self.target_weight_scale)
if 'use_oracle' in self.__dict__ and self.use_oracle:
s += '-Oracle'
return s
class StackingTransfer(FuseTransfer):
def __init__(self, configs=MethodConfigs()):
super(StackingTransfer, self).__init__(configs)
#from far_transfer_methods import GraphTransferNW
self.base_learner = method.SKLRidgeRegression(deepcopy(configs))
self.source_learner = method.NadarayaWatsonMethod(deepcopy(configs))
self.target_learner = method.NadarayaWatsonMethod(deepcopy(configs))
self.joint_cv = getattr(configs, 'joint_cv', False)
self.only_use_source_prediction = False
self.use_all_source = True
self.source_only = False
self.target_only = False
self.just_bias = False
self.linear_source = False
if self.target_only or self.source_only or self.linear_source:
self.joint_cv = False
if self.just_bias:
self.base_learner.cv_params = {
'alpha': [1e16]
}
self.joint_cv = False
if self.linear_source:
self.target_learner.cv_params = {
'sigma': [1]
}
if self.joint_cv:
self.cv_params = self.base_learner.cv_params.copy()
self.cv_params.update(self.target_learner.cv_params)
self.base_learner.cv_params = None
self.target_learner.cv_params = None
sub_configs = deepcopy(configs)
#self.source_learner = method.NadarayaWatsonKNNMethod(deepcopy(sub_configs))
#self.target_learner = method.NadarayaWatsonKNNMethod(deepcopy(sub_configs))
self.use_validation = configs.use_validation
def _switch_labels(self, x, old, new):
x_new = deepcopy(x)
for o, n in zip(old[:], new[:]):
x_new[x == o] = n
return x_new
def _get_stacked_data(self, data):
y_source = np.expand_dims(self.source_learner.predict(data).y, 1)
y_target = np.expand_dims(self.target_learner.predict(data).y, 1)
if not data.is_regression:
classes = data.classes
new_classes = np.asarray([0,1])
y_source = self._switch_labels(y_source, classes, new_classes)
y_target = self._switch_labels(y_target, classes, new_classes)
if self.linear_source:
y_target[:] = 0
x = np.hstack((y_target, y_source))
data_stacked = deepcopy(data)
data_stacked.x = x
return data_stacked
def train(self, data):
if data.n_train_labeled == 0 or self.source_only:
self.only_use_source_prediction = True
return
if self.joint_cv:
self.target_learner.set_params(sigma=self.sigma)
self.base_learner.set_params(alpha=self.alpha)
self.target_learner.train(data)
else:
self.target_learner.train_and_test(data)
self.only_use_source_prediction = False
if self.target_only:
return
#Need unlabeled data if using validation data for parameter tuning
#I = data.is_labeled & data.is_target
I = data.is_target
stacked_data = self._get_stacked_data(data).get_subset(I)
if self.joint_cv:
self.base_learner.train(stacked_data)
else:
self.base_learner.train_and_test(stacked_data)
if not self.running_cv:
'''
dict = {
'source': self.source_learner.sigma,
'target': self.target_learner.sigma,
'lambda': self.base_learner.alpha
}
'''
print 'Stacking params: ' + str(dict)
def predict(self, data):
if self.only_use_source_prediction or self.source_only:
return self.source_learner.predict(data)
if self.target_only:
return self.target_learner.predict(data)
stacked_data = self._get_stacked_data(data)
return self.base_learner.predict(stacked_data)
def _prepare_data(self, data, include_unlabeled=True):
data = super(StackingTransfer, self)._prepare_data(data, include_unlabeled)
if not self.target_only:
source_data = data.get_subset(data.is_source)
if self.preprocessor is not None:
source_data = self.preprocessor.preprocess(source_data, self.configs)
self.source_learner.configs.source_labels = None
self.source_learner.configs.target_labels = None
source_data.set_target()
self.source_learner.train_and_test(source_data)
target_data = data.get_subset(data.is_target)
return target_data
@property
def prefix(self):
s = 'StackTransfer+' + self.base_learner.prefix
if self.preprocessor is not None and self.preprocessor.prefix() is not None:
s += '-' + self.preprocessor.prefix()
if getattr(self, 'source_only', False):
s += '-source'
if getattr(self, 'target_only', False):
s += '-target'
if getattr(self, 'just_bias', False):
s += '-bias'
if getattr(self, 'linear_source', False):
s += '-linearSource'
if getattr(self, 'joint_cv', False):
s += '-jointCV'
if getattr(self, 'use_validation', False):
s += '-VAL'
return s
class HypothesisTransfer(FuseTransfer):
WEIGHTS_ALL = 0
WEIGHTS_JUST_TARGET = 1
WEIGHTS_JUST_OPTIMAL = 2
WEIGHTS_JUST_FIRST = 3
def __init__(self, configs=MethodConfigs()):
super(HypothesisTransfer, self).__init__(configs)
self.cv_params = {
'C': self.create_cv_params(-5,5),
'C2': self.create_cv_params(-5, 5),
'C3': self.create_cv_params(-5, 5),
}
self.w = None
self.b = None
#self.base_source_learner = method.SKLRidgeClassification(deepcopy(configs))
self.base_source_learner = None
self.label_transform = None
self.source_w = []
self.transform = StandardScaler()
#self.transform = None
self.use_oracle = False
self.tune_C = False
#self.weight_type = HypothesisTransfer.WEIGHTS_ALL
#self.weight_type = HypothesisTransfer.WEIGHTS_JUST_TARGET
#self.weight_type = HypothesisTransfer.WEIGHTS_JUST_OPTIMAL
self.weight_type = HypothesisTransfer.WEIGHTS_JUST_FIRST
if hasattr(configs, 'weight_type'):
self.weight_type = configs.weight_type
self.oracle_data_set_ids = configs.oracle_data_set_ids
self.c_value = None
self.use_test_error_for_model_selection = configs.use_test_error_for_model_selection
if self.weight_type == HypothesisTransfer.WEIGHTS_JUST_TARGET:
del self.cv_params['C2']
del self.cv_params['C3']
self.C2 = 0
self.C3 = 0
elif not getattr(self, 'tune_C', True):
del self.cv_params['C']
self.C = 0
def train_and_test(self, data):
#data = data.get_subset(data.data_set_ids == 0)
source_labels = self.configs.source_labels
data = self._prepare_data(data)
target_data = data.get_subset(data.data_set_ids == 0)
#self.cv_params['C'] = np.zeros(1)
if self.weight_type != HypothesisTransfer.WEIGHTS_JUST_TARGET:
base_configs = deepcopy(self.configs)
base_configs.weight_type = HypothesisTransfer.WEIGHTS_JUST_TARGET
self.base_source_learner = HypothesisTransfer(base_configs)
self.base_source_learner.cv_use_data_type = False
self.base_source_learner.use_test_error_for_model_selection = False
#self.base_source_learner.cv_params['C'] = np.zeros(1)
#for i, s in enumerate(source_labels):
for data_set_id in np.unique(data.data_set_ids):
if data_set_id == 0:
continue
#source_inds = data.get_transfer_inds(s)
source_data = data.get_subset(data.data_set_ids == data_set_id)
source_data.data_set_ids[:] = 0
source_data.is_target[:] = True
self.base_source_learner.train_and_test(source_data)
best_params = self.base_source_learner.best_params
w = np.squeeze(self.base_source_learner.w)
w /= np.linalg.norm(w)
b = self.base_source_learner.b
self.source_w.append(w)
pass
ws1 = self.source_w[0]
ws2 = self.source_w[1]
target_data_copy = deepcopy(target_data)
target_data_copy.is_train[:] = True
target_data_copy.y = target_data_copy.true_y
self.base_source_learner.train_and_test(target_data_copy)
wt = np.squeeze(self.base_source_learner.w)
wt /= np.linalg.norm(wt)
d1 = norm(ws1-wt)
d2 = norm(ws2-wt)
pass
o = super(HypothesisTransfer, self).train_and_test(target_data)
print 'c: ' + str(np.squeeze(self.c_value))
return o
def estimate_c(self, data):
x = data.x[data.is_labeled & data.is_train]
if self.transform is not None:
x = self.transform.fit_transform(x)
y = data.y[data.is_labeled]
if self.label_transform is not None:
y = self.label_transform.fit_transform(y)
n = y.size
p = data.p
c = cvx.Variable(len(self.source_w))
#ws1 = self.source_w[0]
#ws2 = self.source_w[1]
ws = 0
for i, wsi in enumerate(self.source_w):
ws += wsi * c[i]
constraints = [c >= 0]
constraint_methods = {
HypothesisTransfer.WEIGHTS_JUST_OPTIMAL,
HypothesisTransfer.WEIGHTS_JUST_FIRST
}
found_first = False
if self.weight_type in constraint_methods:
for i in range(c.size[0]):
id = i + 1
is_oracle = id in self.oracle_data_set_ids
just_first = self.weight_type == HypothesisTransfer.WEIGHTS_JUST_FIRST and found_first
if is_oracle and not just_first:
found_first = True
continue
constraints.append(c[i] == 0)
loss = 0
for i in range(y.size):
xi = x[i, :]
yi = y[i]
x_mi = np.delete(x, i, axis=0)
y_mi = np.delete(y, i, axis=0)
b_mi = y_mi.mean()
A = x_mi.T.dot(x_mi) + (self.C + self.C2) * np.eye(p)
k = x_mi.T.dot(y_mi) - x_mi.T.sum(1) * b_mi + self.C2 * ws
w_mi = scipy.linalg.inv(A) * k
loss += cvx.power(w_mi.T * xi + b_mi - yi, 2)
reg = cvx.norm2(c)**2
#reg = cvx.norm2(c)
obj = cvx.Minimize(loss + self.C3 * reg)
prob = cvx.Problem(obj, constraints)
assert prob.is_dcp()
try:
prob.solve(cvx.SCS)
c_value = np.asarray(c.value)
except Exception as e:
print str(e)
c_value = np.zeros(p)
# c_value[np.abs(c_value) <= 1e-4] = 0
# assert np.all(c_value >= 0)
c_value[c_value < 0] = 0
return c_value
def train(self, data):
x = data.x[data.is_labeled & data.is_train]
if self.transform is not None:
x = self.transform.fit_transform(x)
y = data.y[data.is_labeled]
if self.label_transform is not None:
y = self.label_transform.fit_transform(y)
n = y.size
p = data.p
self.b = y.mean()
#print str(np.squeeze(c_value))
if self.weight_type == HypothesisTransfer.WEIGHTS_JUST_TARGET:
c_value = np.zeros(len(self.source_w))
#ws1 = 0
#ws2 = 0
else:
c_value = self.estimate_c(data)
#ws1 = self.source_w[0]
#ws2 = self.source_w[1]
ws = 0
for i, wsi in enumerate(self.source_w):
ws += wsi*c_value[i]
A = x.T.dot(x) + (self.C + self.C2)*np.eye(p)
#k = x.T.dot(y) - x.T.sum(1)*self.b + self.C2*(ws1*c_value[0] + ws2*c_value[1])
k = x.T.dot(y) - x.T.sum(1) * self.b + self.C2 * ws
self.w = np.linalg.solve(A, k)
self.c_value = c_value
pass
def predict(self, data):
o = Output(data)
x = data.x
if self.transform is not None:
x = self.transform.transform(x)
y = x.dot(self.w) + self.b
#y = np.round(y)
#y[y >= .5] = 1
#y[y < .5] = 0
y = np.sign(y)
o.y = y
o.fu = y
if self.label_transform is not None:
o.true_y = self.label_transform.transform(o.true_y)
if not self.running_cv:
is_correct = (o.y == o.true_y)
mean_train = is_correct[o.is_train].mean()
mean_test = is_correct[o.is_test].mean()
mean_train_labeled = is_correct[data.is_train & data.is_labeled].mean()
pass
return o
@property
def prefix(self):
s = 'HypTransfer'
weight_type = getattr(self, 'weight_type', HypothesisTransfer.WEIGHTS_ALL)
if weight_type == HypothesisTransfer.WEIGHTS_JUST_TARGET:
s += '-target'
else:
if weight_type == HypothesisTransfer.WEIGHTS_JUST_OPTIMAL:
s += '-optimal'
elif weight_type == HypothesisTransfer.WEIGHTS_JUST_FIRST:
s += '-first'
if not getattr(self, 'tune_C', False):
s += '-noC'
if getattr(self, 'use_test_error_for_model_selection', False):
s += '-TEST'
return s
class ModelSelectionTransfer(method.ModelSelectionMethod):
def __init__(self, configs=MethodConfigs()):
super(ModelSelectionTransfer, self).__init__(configs)
self.methods.append(TargetTranfer(configs))
self.methods.append(FuseTransfer(configs))
for m in self.methods:
m.base_learner = method.NadarayaWatsonMethod(configs)
@property
def prefix(self):
return 'ModelSelTransfer'
class ReweightedTransfer(method.Method):
def __init__(self, configs=MethodConfigs()):
super(ReweightedTransfer, self).__init__(configs)
self.target_kde = None
self.source_kde = None
self.kde_bandwidths = 10**np.asarray(range(-6,6),dtype='float64')
c = deepcopy(configs)
c.temp_dir = None
self.base_learner = method.NadarayaWatsonMethod(configs)
self.cv_params = {
'B': np.asarray([2, 4, 8, 16, 32])
}
self.base_learner_cv_keys = []
def train_and_test(self, data):
assert self.base_learner.can_use_instance_weights
target_data = data.get_transfer_subset(self.configs.target_labels.ravel(),include_unlabeled=False)
source_data = data.get_transfer_subset(self.configs.source_labels.ravel(), include_unlabeled=False)
is_source = data.get_transfer_inds(self.configs.source_labels.ravel())
data.type[is_source] = data_lib.TYPE_SOURCE
x_T = target_data.x
x_S = source_data.x
params = {'bandwidth': self.kde_bandwidths}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(x_T)
self.target_kde = deepcopy(grid.best_estimator_)
grid.fit(x_S)
self.source_kde = deepcopy(grid.best_estimator_)
old_cv = self.cv_params.copy()
old_base_cv = self.base_learner.cv_params.copy()
assert set(old_cv.keys()) & set(old_base_cv.keys()) == set()
self.cv_params.update(self.base_learner.cv_params)
self.base_learner_cv_keys = old_base_cv.keys()
o = super(ReweightedTransfer, self).train_and_test(data)
self.cv_params = old_cv
self.base_learner.cv_params = old_base_cv
return o
def train(self, data):
I = data.is_labeled
weights = self.get_weights(data.x)
assert np.all(weights >=0 )
weights[weights > self.B] = self.B
data.instance_weights = weights
for key in self.base_learner_cv_keys:
setattr(self.base_learner, key, getattr(self, key))
self.base_learner.train(data)
def get_weights(self, x):
target_scores = np.exp(self.target_kde.score_samples(x))
source_scores = np.exp(self.source_kde.score_samples(x))
return target_scores / source_scores
def predict(self, data):
data.instance_weights = self.get_weights(data.x)
return self.base_learner.predict(data)
@property
def prefix(self):
s = 'CovShift'
if getattr(self.configs, 'use_validation', False):
s += '-VAL'
return s
|
Python
|
CL
|
6335db7e78adff5d04fe7d29c403a1ae2a8c6df6983cd517902bc727ab827724
|
import numpy as np
__all__ = ['svd_clean']
def svd_clean(arr, svd_num=[0], kind='ix'):
'''Clean a 2-D array using a Singular Value Decomposition.
Removes singular values according to either an index number that the user
provides, or a percentage of variance explained by the singular values.
t then transforms data back into original space and returns an array
with the same index/columns.
Parameters
----------
arr : pd.DataFrame, n_dim==2
The array we'll compute SVD on
svd_num : list of ints, or float
If kind == 'ix', the indices of the singular values to keep.
If kind == 'perc', the cutoff percentage of variance explained,
above which we throw out singular values
kind : str, ['ix', 'perc']
See svd_num
Returns
-------
clean_all : np.array
The cleaned input array
'''
from scipy.linalg import svd
U, s, Vh = svd(arr, full_matrices=False)
if kind == 'perc':
s_scaled = s / np.sum(s)
s_cumulative = np.cumsum(s_scaled)
s_cut = np.argwhere(s_cumulative > svd_num)[0]
if s_cut == 0:
s_cut += 1
svd_num = range(s_cut)
U = U[:, svd_num]
s = np.diag(s.squeeze()[svd_num])
Vh = Vh[svd_num, :]
clean_arr = np.dot(U, s).dot(Vh)
return clean_arr
|
Python
|
CL
|
65931fb5c30eabb711af557728a5df0e062aac31998218b3b63cf9fe1ce11fe8
|
"""
Computes plume volume metrics from SELFE outputs.
Reads *_salt.63.nc files. Only netcdf output files are supported.
Tuomas Karna 2013-11-08
"""
import sys
import numpy as np
import datetime
import traceback
import time as timeMod
from netCDF4 import Dataset as NetCDFFile
from crane.data import meshContainer
from crane.data import dataContainer
from crane.data import timeArray
from crane.files import gr3Interface
from crane.data import ncExtract
from crane.data import dirTreeManager
class plumeStatsComputer(object):
"""Class that computes the default plume statistics from SELFE netcdf
outputs.
"""
def __init__(self, path, plumeRegionGR3File, saltThreshold):
"""Initializes data structures for the given plume region file."""
# read salt field from netcdf file
self.extractor = ncExtract.selfeExtractBase(path, var='salt')
self.extractor.initialize()
self.ncReader = self.extractor.dataFile
self.saltThreshold = saltThreshold
# load plume region
plumeRegionMC = gr3Interface.readGR3FileToMC(plumeRegionGR3File)
# check that gr3 file matches the mesh in netCDF file
if (self.ncReader.nodeX.shape != plumeRegionMC.x.shape or
not np.allclose(self.ncReader.nodeX, plumeRegionMC.x) or
self.ncReader.nodeY.shape != plumeRegionMC.y.shape or
not np.allclose(self.ncReader.nodeY, plumeRegionMC.y) or
not np.array_equal(self.ncReader.faceNodes,
plumeRegionMC.connectivity)):
raise Exception(
'Given GR3 file does not match the mesh in output files')
plume_mask = plumeRegionMC.data[:, 0, 0] == 1
# create minimesh only for plume region, save 50% cpu time
goodElems = plume_mask[self.ncReader.faceNodes].max(axis=1)
self.plume_mask = np.sort(
np.unique(
self.ncReader.faceNodes[
goodElems, :]))
self.nodeX = self.ncReader.nodeX[self.plume_mask]
self.nodeY = self.ncReader.nodeY[self.plume_mask]
# print self.nodeX.shape
self.nodesToMiniMesh = -1 * np.ones_like(
self.ncReader.nodeX, dtype=int)
# print self.nodeX.shape
self.nodesToMiniMesh[self.plume_mask] = np.arange(len(self.nodeX))
self.faceNodes = self.nodesToMiniMesh[self.ncReader.faceNodes]
self.faceNodes = self.faceNodes[goodElems]
self.areas = meshContainer.computeAreas(
self.faceNodes, self.nodeX, self.nodeY)
self.elem_center_x = self.nodeX[self.faceNodes].mean(axis=1)
self.elem_center_y = self.nodeY[self.faceNodes].mean(axis=1)
self.bath = self.ncReader.bath[self.plume_mask]
# full mesh
#areas = computeAreas(ee.faceNodes,ee.nodeX,ee.nodeY)
#elem_center_x = ee.nodeX[ee.faceNodes].mean(axis=1)
#elem_center_y =ee.nodeY[ee.faceNodes].mean(axis=1)
#bath = ee.bath
#faceNodes = ee.faceNodes
def processStack(self, stack):
"""Computes plume stats for all time steps in the netcdf file
Returns
-------
times : np.ndarray (nTime,)
time stamps in epoch format
values : np.ndarray (nTime,nStats)
plume statistics: area, centroid_x, centroid_y, volume, thickness
"""
ncfile = self.extractor.getNCFile(stack)
nTime = len(ncfile.dimensions['time'])
time = ncfile.getTime()
# area, centroid_x, centroid_y, volume, thickness
plume_stats = np.zeros((nTime, 5))
for iTime in xrange(nTime):
# minimesh
salt = ncfile.variables['salt'][iTime, :, :][
:, self.plume_mask] # (nTime,nVert,nNodes)
eta = ncfile.variables['elev'][iTime, :][self.plume_mask]
# full mesh
# salt = ncfile.variables['salt'][iTime,:,:] #(nTime,nVert,nNodes)
#eta = ncfile.variables['elev'][iTime,:]
Z, kbp2, iwet = self.ncReader.vCoords.computeVerticalCoordinates(
eta, self.bath)
salt_in_range = salt <= self.saltThreshold
# minimesh
plume_nodes = np.logical_and(salt_in_range.max(axis=0), iwet)
# full mesh
#plume_nodes = np.logical_and( salt_in_range.max(axis=0), plume_mask )
#plume_nodes = np.logical_and( plume_nodes, iwet )
plume_triangles = plume_nodes[self.faceNodes].max(axis=1)
plume_area = np.sum(self.areas[plume_triangles])
plume_x = np.sum((self.areas * self.elem_center_x)
[plume_triangles]) / plume_area
plume_y = np.sum((self.areas * self.elem_center_y)
[plume_triangles]) / plume_area
# alpha in [0,1]: fraction of vertical edge in the plume
salt_top = salt[1:, :] # top of each vertical 1d element
salt_bot = salt[:-1, :]
salt_min = np.minimum(salt_top, salt_bot)
a = (self.saltThreshold - salt_min)
b = np.abs(salt_top - salt_bot)
alpha = np.zeros_like(a)
ix = np.logical_and(b > 1e-10, ~b.mask)
alpha[ix] = a[ix] / b[ix]
alpha[alpha < 0] = 0
alpha[alpha > 1] = 1
# compute height in plume for each vertical line
z_top = Z[1:, :]
z_bot = Z[:-1, :]
plume_height_nodes = (alpha * (z_top - z_bot)).sum(axis=0)
plume_height_tri = plume_height_nodes[self.faceNodes].mean(axis=1)
# et finalement le plume volume
plume_volume = self.areas * plume_height_tri
plume_volume = plume_volume[plume_triangles].sum()
# plume sickness
plume_thickness = plume_volume / plume_area
plume_stats[
iTime,
:] = [
plume_area,
plume_x,
plume_y,
plume_volume,
plume_thickness]
# print 'plume area',plume_area
# print 'plume centroid:',plume_x,plume_y
# print 'plume volume:',plume_volume
# print 'plume thickness:',plume_thickness
return time, plume_stats
def processStacks(self, stacks):
"""Computes plume statistics for all the given stacks
Returns
-------
times : np.ndarray (nTime,)
time stamps in epoch format
values : np.ndarray (nTime,nStats)
plume statistics: area, centroid_x, centroid_y, volume, thickness
"""
times = []
values = []
cputime0 = timeMod.clock()
sys.stdout.write(
' * Processing stacks ' + str(stacks[0]) + ' - ' + str(stacks[-1]))
sys.stdout.flush()
for i in range(len(stacks)):
try:
stack = stacks[i]
# (nTime,) (nTime,nStats)
ti, vi = self.processStack(stack)
times.append(ti)
values.append(vi)
except Exception as e:
print 'computing plume stats failed'
traceback.print_exc(file=sys.stdout)
times = np.ma.concatenate(tuple(times), axis=0)
values = np.ma.concatenate(tuple(values), axis=0)
sys.stdout.write(' duration %.2f s\n' % (timeMod.clock() - cputime0))
return times, values
def processDates(self, startTime, endTime):
"""Computes plume volume for the given date range
Returns
-------
times : np.ndarray (nTime,)
time stamps in epoch format
values : np.ndarray (nTime,nStats)
plume statistics: area, centroid_x, centroid_y, volume, thickness
"""
stacks = self.ncReader.getStacks(startTime, endTime, wholeDays=True)
return self.processStacks(stacks)
def getDataContainer(self, startTime, endTime):
"""Computes plume stats for the given time period. Returns a dataContainer
for each metric.
Returns
-------
times : np.ndarray (nTime,)
time stamps in epoch format
values : np.ndarray (nTime,nStats)
plume statistics: area, centroid_x, centroid_y, volume, thickness
"""
time, values = self.processDates(startTime, endTime)
# make dataContainer
goodIx = np.isfinite(np.sum(values, axis=1))
time = time[goodIx]
values = values[goodIx, :]
varNames = ['plume_area', 'plume_center',
'plume_volume', 'plume_thickness']
fieldIndices = [[0], [1, 2], [3], [4]]
fieldNames = {'plume_center': ['plume_center_x', 'plume_center_y']}
dcs = []
for i, var in enumerate(varNames):
sthSuffix = '_{0:d}'.format(int(self.saltThreshold))
data = np.swapaxes(values[:, fieldIndices[i]], 0, 1)[
None, :, :] # (1,nStats,nTime)
ta = timeArray.timeArray(time, 'epoch')
meta = {}
meta['location'] = 'plume'
meta['instrument'] = 'model'
meta['variable'] = var + sthSuffix
meta['dataType'] = 'plumemetrics'
meta['saltThreshold'] = str(self.saltThreshold)
x = y = z = 0
fNames = [fn + sthSuffix for fn in fieldNames.get(var, [var])]
dc = dataContainer.dataContainer('', ta, x, y, z, data, fNames,
coordSys='spcs', metaData=meta)
dcs.append(dc)
return dcs
#-------------------------------------------------------------------------
# Main: Commandline interface
#-------------------------------------------------------------------------
def parseCommandLine():
from optparse import OptionParser
parser = OptionParser()
parser.add_option(
'-r',
'--runTag',
action='store',
type='string',
dest='runTag',
help='Run tag, used as a label in post-proc.')
parser.add_option(
'-d',
'--dataDirectory',
action='store',
type='string',
dest='dataDir',
help='directory where model outputs are stored')
parser.add_option('-s', '--start', action='store', type='string',
dest='startStr', help='Date to start processing')
parser.add_option('-e', '--end', action='store', type='string',
dest='endStr', help='Date to end processing')
parser.add_option(
'-v',
'--saltThreshold',
action='store',
type='float',
default=28.0,
dest='saltThreshold',
help='salinity value that defines the plume, S<=threshold (default %default)')
parser.add_option(
'-t',
'--plumeRegionFile',
action='store',
type='string',
dest='plumeRegionGR3File',
help='a gr3 file indicating the plume region (depth==1)',
default=None)
(options, args) = parser.parse_args()
runTag = options.runTag
startStr = options.startStr
endStr = options.endStr
dataDir = options.dataDir
plumeRegionGR3File = options.plumeRegionGR3File
saltThreshold = options.saltThreshold
if not dataDir:
parser.print_help()
parser.error('dataDir undefined')
if not startStr:
parser.print_help()
parser.error('startStr undefined')
if not endStr:
parser.print_help()
parser.error('endStr undefined')
if not runTag:
parser.print_help()
parser.error('runTag undefined')
if not plumeRegionGR3File:
parser.print_help()
parser.error('plume region file undefined undefined')
startTime = datetime.datetime.strptime(startStr, '%Y-%m-%d')
endTime = datetime.datetime.strptime(endStr, '%Y-%m-%d')
print 'Parsed options:'
print ' - time range:', str(startTime), '->', str(endTime)
print ' - salinity threshold:', saltThreshold
print ' - dataDir', dataDir
print ' - runTag', runTag
print ' - plume region file', plumeRegionGR3File
# Extract
psc = plumeStatsComputer(dataDir, plumeRegionGR3File, saltThreshold)
dcs = psc.getDataContainer(startTime, endTime)
for dc in dcs:
dc.setMetaData('tag', runTag)
rule = 'monthlyFile'
dirTreeManager.saveDataContainerInTree(dcs, rule=rule, dtype=np.float32,
overwrite=True)
if __name__ == '__main__':
parseCommandLine()
|
Python
|
CL
|
1bed8fbe0525bb625e77935b2f464473fa00d2c53eeb2df003964a6ec1f45086
|
import numpy as np
import tensorflow as tf
import os
import sys
sys.path.append("..")
import yaml
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from model.adversarial_model import Adversarial_Network
from model.transfer_model import Transfer
from utils.data_loader import load_data, batch_iter
from itertools import chain
with open("../config/config.yaml", "rb") as f:
params = yaml.load(f)
class EVAL(object):
"""
This is a training without adversarial network
"""
def __init__(self, sequence_length):
# load data first
self.processor = learn.preprocessing.VocabularyProcessor.restore(
"../temp/vocab")
self.processor.max_document_length = sequence_length
raw_data, raw_label = load_data("test")
self.train_data = []
self.train_label = []
for rd, rl in zip(raw_data, raw_label):
# for each task in data
tmp_data = []
tmp_label = []
rd = list(self.processor.transform(rd)) # generator -> list
for tmp_x, tmp_y in zip(rd, rl):
tmp_x = tmp_x.tolist()
if np.sum(tmp_x) != 0:
tmp_data.append(tmp_x)
tmp_label.append(tmp_y)
self.train_data.append(tmp_data)
self.train_label.append(tmp_label)
del raw_data, raw_label
print("load training data complete!")
def process(self, learning_rate, batch_size, epochs, evaluate_every):
"""
"""
graph = tf.Graph()
with graph.as_default():
instance = Adversarial_Network(
sequence_length=params["global"]["sequence_length"],
num_classes=params["global"]["num_classes"],
embedding_size=params["global"]["embedding_size"],
vocab_size=len(
self.processor.vocabulary_),
embedding_matrix=None,
static=params["global"]["static"],
rnn_hidden_size=params["global"]["rnn_hidden_size"],
shared_num_layers=params["shared_model"]["num_layers"],
private_num_layers=params["private_model"]["num_layers"],
dynamic=params["global"]["dynamic"],
use_attention=params["global"]["use_attention"],
attention_size=params["global"]["attention_size"],
mlp_hidden_size=params["global"]["mlp_hidden_size"])
global_step = tf.Variable(0, trainable=False)
init = tf.global_variables_initializer()
# discriminator_optimizer = tf.train.AdamOptimizer(learning_rate)
# task_optimizer = tf.train.GradientDescentOptimizer(learning_rate)
# shared_optimizer = tf.train.AdamOptimizer(learning_rate)
# advloss = instance.adv_loss
taskloss = instance.task_loss
# discriminator_vars = tf.get_collection(
# tf.GraphKeys.TRAINABLE_VARIABLES, scope="discriminator") # OK
# print(discriminator_vars)
embedding_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="W")
print(embedding_vars) # extract the params in embedding layer succeed
shared_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="shared")
print(shared_vars)
apparel_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="private-apparel")
print(apparel_vars)
books_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="private-books")
print(books_vars)
fc_vars = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, scope="fully-connected-layer")
print(fc_vars) # extract the params in fully-connected layer succeed
print("this is scope in instance")
print(instance.scope)
with tf.Session() as sess:
sess.run(init)
# dvs = sess.run(discriminator_vars)
# print(dvs)
# svs = sess.run(shared_vars)
# print(svs)
# avs = sess.run(apparel_vars)
# print(avs)
# bvs = sess.run(books_vars)
# print(bvs)
# tvars = tf.trainable_variables()
# print(tvars)
# rnn_w = instance.rnn_model.cell_fw.trainable_variables
rnn_vars = instance.rnn_model.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
print(rnn_vars)
for task, batch in batch_iter(self.train_data, self.train_label, batch_size, epochs, shuffle=False):
"""
TODO
need to test saver and restore function
and extract variable according to the specfic scope or name
"""
x, y = zip(*batch)
tl, svs = sess.run([taskloss, shared_vars], feed_dict={instance.task: task, instance.input_x: x, instance.input_y: y})
# print(al)
print(tl)
print(svs)
break
if __name__ == "__main__":
# https://stackoverflow.com/questions/45263666/tensorflow-variable-reuse
# https://sthsf.github.io/2017/06/18/ValueError:%20kernel%20already%20exists/index.html
# https://stackoverflow.com/questions/35013080/tensorflow-how-to-get-all-variables-from-rnn-cell-basiclstm-rnn-cell-multirnn
# 必须显示的将rnn的运行过程也定义在variable_scope中才可以进行下去
# 而且就目前来看,只需要知道shared和disscriminator的variable_scope
# 如果目前有两个tensorflow model, 如何获取两个model中不同的scope
eval = EVAL(params["global"]["sequence_length"])
eval.process(
learning_rate=params["global"]["learning_rate"],
batch_size=params["global"]["batch_size"],
epochs=params["global"]["epochs"],
evaluate_every=100
)
|
Python
|
CL
|
62c702e598278eb57108fdda7c6cbafaee9edbf7de0cdd5e35abf693084ddf53
|
# QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from AlgorithmImports import *
### <summary>
### Base regression algorithm exercising different style options with option price models that might
### or might not support them. Also, if the option style is supported, greeks are asserted to be accesible and have valid values.
### </summary>
class OptionPriceModelForOptionStylesBaseRegressionAlgorithm(QCAlgorithm):
def __init__(self):
super().__init__()
self._optionStyleIsSupported = False
self._checkGreeks = True
self._triedGreeksCalculation = False
self._option = None
def OnData(self, slice):
if self.IsWarmingUp: return
for kvp in slice.OptionChains:
if self._option is None or kvp.Key != self._option.Symbol: continue
self.CheckGreeks([contract for contract in kvp.Value])
def OnEndOfDay(self, symbol):
self._checkGreeks = True
def OnEndOfAlgorithm(self):
if not self._triedGreeksCalculation:
raise Exception("Expected greeks to be accessed")
def Init(self, option, optionStyleIsSupported):
self._option = option
self._optionStyleIsSupported = optionStyleIsSupported
self._checkGreeks = True
self._triedGreeksCalculation = False
def CheckGreeks(self, contracts):
if not self._checkGreeks or len(contracts) == 0: return
self._checkGreeks = False
self._triedGreeksCalculation = True
for contract in contracts:
greeks = Greeks()
try:
greeks = contract.Greeks
# Greeks should have not been successfully accessed if the option style is not supported
optionStyleStr = 'American' if self._option.Style == OptionStyle.American else 'European'
if not self._optionStyleIsSupported:
raise Exception(f'Expected greeks not to be calculated for {contract.Symbol.Value}, an {optionStyleStr} style option, using {type(self._option.PriceModel).__name__}, which does not support them, but they were')
except ArgumentException:
# ArgumentException is only expected if the option style is not supported
if self._optionStyleIsSupported:
raise Exception(f'Expected greeks to be calculated for {contract.Symbol.Value}, an {optionStyleStr} style option, using {type(self._option.PriceModel).__name__}, which supports them, but they were not')
# Greeks should be valid if they were successfuly accessed for supported option style
# Delta can be {-1, 0, 1} if the price is too wild, rho can be 0 if risk free rate is 0
# Vega can be 0 if the price is very off from theoretical price, Gamma = 0 if Delta belongs to {-1, 1}
if (self._optionStyleIsSupported
and ((contract.Right == OptionRight.Call and (greeks.Delta < 0.0 or greeks.Delta > 1.0 or greeks.Rho < 0.0))
or (contract.Right == OptionRight.Put and (greeks.Delta < -1.0 or greeks.Delta > 0.0 or greeks.Rho > 0.0))
or greeks.Theta == 0.0 or greeks.Vega < 0.0 or greeks.Gamma < 0.0)):
raise Exception(f'Expected greeks to have valid values. Greeks were: Delta: {greeks.Delta}, Rho: {greeks.Rho}, Theta: {greeks.Theta}, Vega: {greeks.Vega}, Gamma: {greeks.Gamma}')
|
Python
|
CL
|
763b71425d98f7fc2f8970ee2b5f0a7c617c9969e9de69254576e1979f5ba03a
|
from .craigstrategy import CRAIGStrategy
from .dataselectionstrategy import DataSelectionStrategy
from .glisterstrategy import GLISTERStrategy
from .randomstrategy import RandomStrategy
from .submodularselectionstrategy import SubmodularSelectionStrategy
from .gradmatchstrategy import GradMatchStrategy
from .fixedweightstrategy import FixedWeightStrategy
from .selconstrategy import SELCONstrategy
from .adapweightsstrategy import AdapWeightsStrategy
from .stochasticgreedyexplorationstrategy import StochasticGreedyExplorationStrategy
from .weightedrandomexplorationstrategy import WeightedRandomExplorationStrategy
|
Python
|
CL
|
255a292cb1b4c5214a03d269b7eadc4b56b924acd1be5240ba8c0d8174ec0805
|
"""
Integration Test for querying multiple symbols
"""
import os
import numpy as np
import pandas as pd
import pymarketstore as pymkts
import pytest
client = pymkts.Client(f"http://127.0.0.1:{os.getenv('MARKETSTORE_PORT', 5993)}/rpc",
grpc=(os.getenv("USE_GRPC", "false") == "true"))
@pytest.mark.parametrize('write_data, query_columns, want_err, want_data', [
# even if the data format of 2 symbols are different,
# querying succeeds if the common columns are specified
({
'TEST1': {'data': [(pd.Timestamp('2017-01-01 00:00').value / 10 ** 9, 10.0)],
'dtype': [('Epoch', 'i8'), ('Ask', 'f4')],
'is_variable_length': False
},
'TEST2': {'data': [(pd.Timestamp('2017-01-01 00:00').value / 10 ** 9, 20.0, 30.0)],
'dtype': [('Epoch', 'i8'), ('Ask', 'f4'), ('Bid', 'f4')],
'is_variable_length': False
},
},
# query_columns
['Ask'],
# want_err
False,
# want_data
{'TEST1': pd.DataFrame(data={'Ask': np.array([10.0], dtype='float32')},
index=pd.Series(['2017-01-01T00:00:00+00:00'],
dtype='datetime64[ns, UTC]', name="Epoch")),
'TEST2': pd.DataFrame(data={'Ask': np.array([20.0], dtype='float32')},
index=pd.Series(['2017-01-01T00:00:00+00:00'],
dtype='datetime64[ns, UTC]', name="Epoch"))
}
),
# if common columns are not specified, query returns an error.
({
'TEST1': {'data': [(pd.Timestamp('2017-01-01 00:00').value / 10 ** 9, 10.0)],
'dtype': [('Epoch', 'i8'), ('Ask', 'f4')],
'is_variable_length': False
},
'TEST2': {'data': [(pd.Timestamp('2017-01-01 00:00').value / 10 ** 9, 20.0, 30.0)],
'dtype': [('Epoch', 'i8'), ('Ask', 'f4'), ('Bid', 'f4')],
'is_variable_length': False
},
},
# query_columns
None, # no columns specified
# want_err
True,
# want_data
None,
),
])
def test_query_multi_symbols(write_data, query_columns, want_err, want_data):
# ---- given ----
for symbol in write_data:
tbk = "{}/1Sec/TICK".format(symbol)
client.destroy(tbk) # setup
client.write(np.array(write_data[symbol]['data'], dtype=write_data[symbol]['dtype']), tbk,
isvariablelength=write_data[symbol]['is_variable_length'])
# ---- when ----
symbols = list(write_data.keys())
if want_err:
with pytest.raises(Exception) as excinfo:
client.query(pymkts.Params(symbols, '1Sec', 'TICK', columns=query_columns))
assert "symbols in a query must have the same data type or be filtered" in str(excinfo.value)
return
reply = client.query(pymkts.Params(symbols, '1Sec', 'TICK', columns=query_columns))
# ---- then ----
tbks = ["{}/1Sec/TICK".format(symbol) for symbol in symbols]
for i in range(len(tbks)):
want = want_data[symbols[i]]
got = reply.all()[tbks[i]].df()
assert got.equals(want)
|
Python
|
CL
|
1a86ff081b27aa250275e6c182bb7e7771a10e13f23b7eb7363441ac2d63cbc3
|
#! /usr/bin/env python3
# ReScience yaml to latex converter
# Released under the BSD two-clauses licence
def generate_latex_metadata(filename, article):
abstract = article.abstract.replace("&", "\&")
content = (
"% DO NOT EDIT - automatically generated from {filename}\n\n"
"\\def \\codeURL{{{_.code.url}}}\n"
"\\def \\codeDOI{{{_.code.doi}}}\n"
"\\def \\codeSWH{{{_.code.swh}}}\n"
"\\def \\dataURL{{{_.data.url}}}\n"
"\\def \\dataDOI{{{_.data.doi}}}\n"
"\\def \\editorNAME{{{_.editors[0].name}}}\n"
"\\def \\editorORCID{{{_.editors[0].orcid}}}\n"
"\\def \\reviewerINAME{{{_.reviewers[0].name}}}\n"
"\\def \\reviewerIORCID{{{_.reviewers[0].orcid}}}\n"
"\\def \\reviewerIINAME{{{_.reviewers[1].name}}}\n"
"\\def \\reviewerIIORCID{{{_.reviewers[1].orcid}}}\n"
"\\def \\dateRECEIVED{{{_.date_received}}}\n"
"\\def \\dateACCEPTED{{{_.date_accepted}}}\n"
"\\def \\datePUBLISHED{{{_.date_published}}}\n"
"\\def \\articleTITLE{{{_.title}}}\n"
"\\def \\articleTYPE{{{_.type}}}\n"
"\\def \\articleDOMAIN{{{_.domain}}}\n"
"\\def \\articleBIBLIOGRAPHY{{{_.bibliography}}}\n"
"\\def \\articleYEAR{{{_.date_published.year}}}\n"
"\\def \\reviewURL{{{_.review.url}}}\n"
# "\\def \\articleABSTRACT{{{_.abstract}}}\n"
"\\def \\articleABSTRACT{{{abstract}}}\n"
"\\def \\replicationCITE{{{_.replication.cite}}}\n"
"\\def \\replicationBIB{{{_.replication.bib}}}\n"
"\\def \\replicationURL{{{_.replication.url}}}\n"
"\\def \\replicationDOI{{{_.replication.doi}}}\n"
"\\def \\contactNAME{{{_.contact.name}}}\n"
"\\def \\contactEMAIL{{{_.contact.email}}}\n"
"\\def \\articleKEYWORDS{{{_.keywords}}}\n"
"\\def \\journalNAME{{{_.journal_name}}}\n"
"\\def \\journalVOLUME{{{_.journal_volume}}}\n"
"\\def \\journalISSUE{{{_.journal_issue}}}\n"
"\\def \\articleNUMBER{{{_.article_number}}}\n"
"\\def \\articleDOI{{{_.article_doi}}}\n"
"\\def \\authorsFULL{{{_.authors_full}}}\n"
"\\def \\authorsABBRV{{{_.authors_abbrv}}}\n"
"\\def \\authorsSHORT{{{_.authors_short}}}\n"
"\\title{{\\articleTITLE}}\n"
"\\date{{}}\n"
"".format(filename=filename, _=article, abstract=abstract))
for author in article.authors:
affiliations = ",".join(author.affiliations)
if len(author.orcid) > 0:
affiliations += ",\\orcid{%s}" % author.orcid
content += "\\author[%s]{%s}\n" % (affiliations, author.name)
for a in article.affiliations:
if len(a.address) > 0:
content += "\\affil[{_.code}]{{{_.name}, {_.address}}}\n".format(_=a)
else:
content += "\\affil[{_.code}]{{{_.name}}}\n".format(_=a)
return content
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import locale
import argparse
from article import Article
# Set to a UTF-8 locale - any non-ascii characters in the metadata in metadata.yaml should be in UTF-8
locale.setlocale(locale.LC_ALL,'en_US.UTF-8')
parser = argparse.ArgumentParser(description='YAML to latex converter.')
parser.add_argument('--input', '-i', dest='filename_in', action='store',
default="metadata.yaml", help='input YAML file')
parser.add_argument('--output', "-o", dest='filename_out', action='store',
default="article-metadata.tex", help='output latex file')
args = parser.parse_args()
filename_in = args.filename_in
filename_out = args.filename_out
# print("Generating latex definitions ({1}) from {0}".format(filename_in, filename_out))
with open(filename_in, "r") as file:
article = Article(file.read())
if len(article.authors) > 0:
content = generate_latex_metadata(filename_in, article)
if filename_out is not None:
with open(filename_out, "w") as file:
file.write(content)
else:
print(content)
else:
print("Error! No author found.")
|
Python
|
CL
|
750b15f68ab4bd4c3a2287ab75232edf024ff0ae24e1a713345f156309b38459
|
# Copyright 2017 Inspur Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Tests for the Inspur InStorage volume driver."""
import re
from oslo_concurrency import processutils
from oslo_utils import units
import six
from cinder import exception
from cinder import utils
from cinder.volume.drivers.inspur.instorage import instorage_const
from cinder.volume.drivers.inspur.instorage import instorage_fc
from cinder.volume.drivers.inspur.instorage import instorage_iscsi
MCS_POOLS = ['openstack', 'openstack1']
def get_test_pool(get_all=False):
if get_all:
return MCS_POOLS
else:
return MCS_POOLS[0]
class FakeInStorageMCSFcDriver(instorage_fc.InStorageMCSFCDriver):
def __init__(self, *args, **kwargs):
super(FakeInStorageMCSFcDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _run_ssh(self, cmd, check_exit_code=True, attempts=1):
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
return ret
class FakeInStorageMCSISCSIDriver(instorage_iscsi.InStorageMCSISCSIDriver):
def __init__(self, *args, **kwargs):
super(FakeInStorageMCSISCSIDriver, self).__init__(*args, **kwargs)
def set_fake_storage(self, fake):
self.fake_storage = fake
def _run_ssh(self, cmd, check_exit_code=True, attempts=1):
utils.check_ssh_injection(cmd)
ret = self.fake_storage.execute_command(cmd, check_exit_code)
return ret
class FakeInStorage(object):
def __init__(self, pool_name):
self._flags = {'instorage_mcs_volpool_name': pool_name}
self._volumes_list = {}
self._hosts_list = {}
self._mappings_list = {}
self._lcmappings_list = {}
self._lcconsistgrp_list = {}
self._rcrelationship_list = {}
self._partnership_list = {}
self._partnershipcandidate_list = {}
self._system_list = {'instorage-mcs-sim':
{'id': '0123456789ABCDEF',
'name': 'instorage-mcs-sim'},
'aux-mcs-sim': {'id': 'ABCDEF0123456789',
'name': 'aux-mcs-sim'}}
self._other_pools = {'openstack2': {}, 'openstack3': {}}
self._next_cmd_error = {
'lsportip': '',
'lsfabric': '',
'lsiscsiauth': '',
'lsnodecanister': '',
'mkvdisk': '',
'lsvdisk': '',
'lslcmap': '',
'prestartlcmap': '',
'startlcmap': '',
'rmlcmap': '',
'lslicense': '',
'lsguicapabilities': '',
'lshost': '',
'lsrcrelationship': ''
}
self._errors = {
'CMMVC5701E': ('', 'CMMVC5701E No object ID was specified.'),
'CMMVC6035E': ('', 'CMMVC6035E The action failed as the '
'object already exists.'),
'CMMVC5753E': ('', 'CMMVC5753E The specified object does not '
'exist or is not a suitable candidate.'),
'CMMVC5707E': ('', 'CMMVC5707E Required parameters are missing.'),
'CMMVC6581E': ('', 'CMMVC6581E The command has failed because '
'the maximum number of allowed iSCSI '
'qualified names (IQNs) has been reached, '
'or the IQN is already assigned or is not '
'valid.'),
'CMMVC5754E': ('', 'CMMVC5754E The specified object does not '
'exist, or the name supplied does not meet '
'the naming rules.'),
'CMMVC6071E': ('', 'CMMVC6071E The VDisk-to-host mapping was '
'not created because the VDisk is already '
'mapped to a host.'),
'CMMVC5879E': ('', 'CMMVC5879E The VDisk-to-host mapping was '
'not created because a VDisk is already '
'mapped to this host with this SCSI LUN.'),
'CMMVC5840E': ('', 'CMMVC5840E The virtual disk (VDisk) was '
'not deleted because it is mapped to a '
'host or because it is part of a LocalCopy '
'or Remote Copy mapping, or is involved in '
'an image mode migrate.'),
'CMMVC6527E': ('', 'CMMVC6527E The name that you have entered '
'is not valid. The name can contain letters, '
'numbers, spaces, periods, dashes, and '
'underscores. The name must begin with a '
'letter or an underscore. The name must not '
'begin or end with a space.'),
'CMMVC5871E': ('', 'CMMVC5871E The action failed because one or '
'more of the configured port names is in a '
'mapping.'),
'CMMVC5924E': ('', 'CMMVC5924E The LocalCopy mapping was not '
'created because the source and target '
'virtual disks (VDisks) are different sizes.'),
'CMMVC6303E': ('', 'CMMVC6303E The create failed because the '
'source and target VDisks are the same.'),
'CMMVC7050E': ('', 'CMMVC7050E The command failed because at '
'least one node in the I/O group does not '
'support compressed VDisks.'),
'CMMVC6430E': ('', 'CMMVC6430E The command failed because the '
'target and source managed disk groups must '
'be different.'),
'CMMVC6353E': ('', 'CMMVC6353E The command failed because the '
'copy specified does not exist.'),
'CMMVC6446E': ('', 'The command failed because the managed disk '
'groups have different extent sizes.'),
# Catch-all for invalid state transitions:
'CMMVC5903E': ('', 'CMMVC5903E The LocalCopy mapping was not '
'changed because the mapping or consistency '
'group is another state.'),
'CMMVC5709E': ('', 'CMMVC5709E [-%(VALUE)s] is not a supported '
'parameter.'),
'CMMVC5982E': ('', 'CMMVC5982E The operation was not performed '
'because it is not valid given the current '
'relationship state.'),
'CMMVC5963E': ('', 'CMMVC5963E No direction has been defined.'),
}
self._lc_transitions = {'begin': {'make': 'idle_or_copied'},
'idle_or_copied': {'prepare': 'preparing',
'delete': 'end',
'delete_force': 'end'},
'preparing': {'flush_failed': 'stopped',
'wait': 'prepared'},
'end': None,
'stopped': {'prepare': 'preparing',
'delete_force': 'end'},
'prepared': {'stop': 'stopped',
'start': 'copying'},
'copying': {'wait': 'idle_or_copied',
'stop': 'stopping'},
# Assume the worst case where stopping->stopped
# rather than stopping idle_or_copied
'stopping': {'wait': 'stopped'},
}
self._lc_cg_transitions = {'begin': {'make': 'empty'},
'empty': {'add': 'idle_or_copied'},
'idle_or_copied': {'prepare': 'preparing',
'delete': 'end',
'delete_force': 'end'},
'preparing': {'flush_failed': 'stopped',
'wait': 'prepared'},
'end': None,
'stopped': {'prepare': 'preparing',
'delete_force': 'end'},
'prepared': {'stop': 'stopped',
'start': 'copying',
'delete_force': 'end',
'delete': 'end'},
'copying': {'wait': 'idle_or_copied',
'stop': 'stopping',
'delete_force': 'end',
'delete': 'end'},
# Assume the case where stopping->stopped
# rather than stopping idle_or_copied
'stopping': {'wait': 'stopped'},
}
self._rc_transitions = {'inconsistent_stopped':
{'start': 'inconsistent_copying',
'stop': 'inconsistent_stopped',
'delete': 'end',
'delete_force': 'end'},
'inconsistent_copying': {
'wait': 'consistent_synchronized',
'start': 'inconsistent_copying',
'stop': 'inconsistent_stopped',
'delete': 'end',
'delete_force': 'end'},
'consistent_synchronized': {
'start': 'consistent_synchronized',
'stop': 'consistent_stopped',
'stop_access': 'idling',
'delete': 'end',
'delete_force': 'end'},
'consistent_stopped':
{'start': 'consistent_synchronized',
'stop': 'consistent_stopped',
'delete': 'end',
'delete_force': 'end'},
'end': None,
'idling': {
'start': 'inconsistent_copying',
'stop': 'inconsistent_stopped',
'stop_access': 'idling',
'delete': 'end',
'delete_force': 'end'},
}
def _state_transition(self, function, lcmap):
if (function == 'wait' and
'wait' not in self._lc_transitions[lcmap['status']]):
return ('', '')
if lcmap['status'] == 'copying' and function == 'wait':
if lcmap['copyrate'] != '0':
if lcmap['progress'] == '0':
lcmap['progress'] = '50'
else:
lcmap['progress'] = '100'
lcmap['status'] = 'idle_or_copied'
return ('', '')
else:
try:
curr_state = lcmap['status']
lcmap['status'] = self._lc_transitions[curr_state][function]
return ('', '')
except Exception:
return self._errors['CMMVC5903E']
def _lc_cg_state_transition(self, function, lc_consistgrp):
if (function == 'wait' and
'wait' not in self._lc_transitions[lc_consistgrp['status']]):
return ('', '')
try:
curr_state = lc_consistgrp['status']
new_state = self._lc_cg_transitions[curr_state][function]
lc_consistgrp['status'] = new_state
return ('', '')
except Exception:
return self._errors['CMMVC5903E']
# Find an unused ID
@staticmethod
def _find_unused_id(d):
ids = []
for v in d.values():
ids.append(int(v['id']))
ids.sort()
for index, n in enumerate(ids):
if n > index:
return six.text_type(index)
return six.text_type(len(ids))
# Check if name is valid
@staticmethod
def _is_invalid_name(name):
if re.match(r'^[a-zA-Z_][\w._-]*$', name):
return False
return True
# Convert argument string to dictionary
@staticmethod
def _cmd_to_dict(arg_list):
no_param_args = [
'autodelete',
'bytes',
'compressed',
'force',
'nohdr',
'nofmtdisk',
'async',
'access',
'start'
]
one_param_args = [
'chapsecret',
'cleanrate',
'copy',
'copyrate',
'delim',
'intier',
'filtervalue',
'grainsize',
'hbawwpn',
'host',
'iogrp',
'iscsiname',
'mdiskgrp',
'name',
'rsize',
'scsi',
'size',
'source',
'target',
'unit',
'vdisk',
'warning',
'wwpn',
'primary',
'consistgrp',
'master',
'aux',
'cluster',
'linkbandwidthmbits',
'backgroundcopyrate'
]
no_or_one_param_args = [
'autoexpand',
]
# Handle the special case of lsnode which is a two-word command
# Use the one word version of the command internally
if arg_list[0] in ('mcsinq', 'mcsop'):
if arg_list[1] == 'lsnode':
if len(arg_list) > 4: # e.g. mcsinq lsnode -delim ! <node id>
ret = {'cmd': 'lsnode', 'node_id': arg_list[-1]}
else:
ret = {'cmd': 'lsnodecanister'}
else:
ret = {'cmd': arg_list[1]}
arg_list.pop(0)
else:
ret = {'cmd': arg_list[0]}
skip = False
for i in range(1, len(arg_list)):
if skip:
skip = False
continue
# Check for a quoted command argument for volumes and strip
# quotes so that the simulater can match it later. Just
# match against test naming convensions for now.
if arg_list[i][0] == '"' and ('volume' in arg_list[i] or
'snapshot' in arg_list[i]):
arg_list[i] = arg_list[i][1:-1]
if arg_list[i][0] == '-':
if arg_list[i][1:] in no_param_args:
ret[arg_list[i][1:]] = True
elif arg_list[i][1:] in one_param_args:
ret[arg_list[i][1:]] = arg_list[i + 1]
skip = True
elif arg_list[i][1:] in no_or_one_param_args:
if i == (len(arg_list) - 1) or arg_list[i + 1][0] == '-':
ret[arg_list[i][1:]] = True
else:
ret[arg_list[i][1:]] = arg_list[i + 1]
skip = True
else:
raise exception.InvalidInput(
reason='unrecognized argument %s' % arg_list[i])
else:
ret['obj'] = arg_list[i]
return ret
@staticmethod
def _print_info_cmd(rows, delim=' ', nohdr=False, **kwargs):
"""Generic function for printing information."""
if nohdr:
del rows[0]
for index in range(len(rows)):
rows[index] = delim.join(rows[index])
return ('%s' % '\n'.join(rows), '')
@staticmethod
def _print_info_obj_cmd(header, row, delim=' ', nohdr=False):
"""Generic function for printing information for a specific object."""
objrows = []
for idx, val in enumerate(header):
objrows.append([val, row[idx]])
if nohdr:
for index in range(len(objrows)):
objrows[index] = ' '.join(objrows[index][1:])
for index in range(len(objrows)):
objrows[index] = delim.join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
@staticmethod
def _convert_bytes_units(bytestr):
num = int(bytestr)
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
while num > 1024:
num = num / 1024
unit_index += 1
return '%d%s' % (num, unit_array[unit_index])
@staticmethod
def _convert_units_bytes(num, unit):
unit_array = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
unit_index = 0
while unit.lower() != unit_array[unit_index].lower():
num = num * 1024
unit_index += 1
return six.text_type(num)
def _cmd_lslicense(self, **kwargs):
rows = [None] * 3
rows[0] = ['used_compression_capacity', '0.08']
rows[1] = ['license_compression_capacity', '0']
if self._next_cmd_error['lslicense'] == 'no_compression':
self._next_cmd_error['lslicense'] = ''
rows[2] = ['license_compression_enclosures', '0']
else:
rows[2] = ['license_compression_enclosures', '1']
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lsguicapabilities(self, **kwargs):
rows = [None] * 2
if self._next_cmd_error['lsguicapabilities'] == 'no_compression':
self._next_cmd_error['lsguicapabilities'] = ''
rows[0] = ['license_scheme', '0']
else:
rows[0] = ['license_scheme', '1813']
rows[1] = ['product_key', instorage_const.DEV_MODEL_INSTORAGE]
return self._print_info_cmd(rows=rows, **kwargs)
# Print mostly made-up stuff in the correct syntax
def _cmd_lssystem(self, **kwargs):
rows = [None] * 3
rows[0] = ['id', '0123456789ABCDEF']
rows[1] = ['name', 'instorage-mcs-sim']
rows[2] = ['code_level', '3.1.1.0 (build 87.0.1311291000)']
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lssystem_aux(self, **kwargs):
rows = [None] * 3
rows[0] = ['id', 'ABCDEF0123456789']
rows[1] = ['name', 'aux-mcs-sim']
rows[2] = ['code_level', '3.1.1.0 (build 87.0.1311291000)']
return self._print_info_cmd(rows=rows, **kwargs)
# Print mostly made-up stuff in the correct syntax, assume -bytes passed
def _cmd_lsmdiskgrp(self, **kwargs):
pool_num = len(self._flags['instorage_mcs_volpool_name'])
rows = []
rows.append(['id', 'name', 'status', 'mdisk_count',
'vdisk_count', 'capacity', 'extent_size',
'free_capacity', 'virtual_capacity', 'used_capacity',
'real_capacity', 'overallocation', 'warning',
'in_tier', 'in_tier_status'])
for i in range(pool_num):
row_data = [str(i + 1),
self._flags['instorage_mcs_volpool_name'][i], 'online',
'1', six.text_type(len(self._volumes_list)),
'3573412790272', '256', '3529926246400',
'1693247906775',
'26843545600', '38203734097', '47', '80', 'auto',
'inactive']
rows.append(row_data)
rows.append([str(pool_num + 1), 'openstack2', 'online',
'1', '0', '3573412790272', '256',
'3529432325160', '1693247906775', '26843545600',
'38203734097', '47', '80', 'auto', 'inactive'])
rows.append([str(pool_num + 2), 'openstack3', 'online',
'1', '0', '3573412790272', '128',
'3529432325160', '1693247906775', '26843545600',
'38203734097', '47', '80', 'auto', 'inactive'])
if 'obj' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
pool_name = kwargs['obj'].strip('\'\"')
if pool_name == kwargs['obj']:
raise exception.InvalidInput(
reason='obj missing quotes %s' % kwargs['obj'])
elif pool_name in self._flags['instorage_mcs_volpool_name']:
for each_row in rows:
if pool_name in each_row:
row = each_row
break
elif pool_name == 'openstack2':
row = rows[-2]
elif pool_name == 'openstack3':
row = rows[-1]
else:
return self._errors['CMMVC5754E']
objrows = []
for idx, val in enumerate(rows[0]):
objrows.append([val, row[idx]])
if 'nohdr' in kwargs:
for index in range(len(objrows)):
objrows[index] = ' '.join(objrows[index][1:])
if 'delim' in kwargs:
for index in range(len(objrows)):
objrows[index] = kwargs['delim'].join(objrows[index])
return ('%s' % '\n'.join(objrows), '')
# Print mostly made-up stuff in the correct syntax
def _cmd_lsnodecanister(self, **kwargs):
rows = [None] * 3
rows[0] = ['id', 'name', 'UPS_serial_number', 'WWNN', 'status',
'IO_group_id', 'IO_group_name', 'config_node',
'UPS_unique_id', 'hardware', 'iscsi_name', 'iscsi_alias',
'panel_name', 'enclosure_id', 'canister_id',
'enclosure_serial_number']
rows[1] = [
'1',
'node1',
'',
'123456789ABCDEF0',
'online',
'0',
'io_grp0',
'yes',
'123456789ABCDEF0',
'100',
'iqn.1982-01.com.inspur:1234.sim.node1',
'',
'01-1',
'1',
'1',
'0123ABC']
rows[2] = [
'2',
'node2',
'',
'123456789ABCDEF1',
'online',
'0',
'io_grp0',
'no',
'123456789ABCDEF1',
'100',
'iqn.1982-01.com.inspur:1234.sim.node2',
'',
'01-2',
'1',
'2',
'0123ABC']
if self._next_cmd_error['lsnodecanister'] == 'header_mismatch':
rows[0].pop(2)
self._next_cmd_error['lsnodecanister'] = ''
if self._next_cmd_error['lsnodecanister'] == 'remove_field':
for row in rows:
row.pop(0)
self._next_cmd_error['lsnodecanister'] = ''
return self._print_info_cmd(rows=rows, **kwargs)
# Print information of every single node of MCS
def _cmd_lsnode(self, **kwargs):
node_infos = dict()
node_infos['1'] = r'''id!1
name!node1
port_id!500507680210C744
port_status!active
port_speed!8Gb
port_id!500507680220C744
port_status!active
port_speed!8Gb
'''
node_infos['2'] = r'''id!2
name!node2
port_id!500507680220C745
port_status!active
port_speed!8Gb
port_id!500507680230C745
port_status!inactive
port_speed!N/A
'''
node_id = kwargs.get('node_id', None)
stdout = node_infos.get(node_id, '')
return stdout, ''
# Print made up stuff for the ports
def _cmd_lsportfc(self, **kwargs):
node_1 = [None] * 7
node_1[0] = ['id', 'fc_io_port_id', 'port_id', 'type',
'port_speed', 'node_id', 'node_name', 'WWPN',
'nportid', 'status', 'attachment']
node_1[1] = ['0', '1', '1', 'fc', '8Gb', '1', 'node1',
'5005076802132ADE', '012E00', 'active', 'switch']
node_1[2] = ['1', '2', '2', 'fc', '8Gb', '1', 'node1',
'5005076802232ADE', '012E00', 'active', 'switch']
node_1[3] = ['2', '3', '3', 'fc', '8Gb', '1', 'node1',
'5005076802332ADE', '9B0600', 'active', 'switch']
node_1[4] = ['3', '4', '4', 'fc', '8Gb', '1', 'node1',
'5005076802432ADE', '012A00', 'active', 'switch']
node_1[5] = ['4', '5', '5', 'fc', '8Gb', '1', 'node1',
'5005076802532ADE', '014A00', 'active', 'switch']
node_1[6] = ['5', '6', '4', 'ethernet', 'N/A', '1', 'node1',
'5005076802632ADE', '000000',
'inactive_unconfigured', 'none']
node_2 = [None] * 7
node_2[0] = ['id', 'fc_io_port_id', 'port_id', 'type',
'port_speed', 'node_id', 'node_name', 'WWPN',
'nportid', 'status', 'attachment']
node_2[1] = ['6', '7', '7', 'fc', '8Gb', '2', 'node2',
'5005086802132ADE', '012E00', 'active', 'switch']
node_2[2] = ['7', '8', '8', 'fc', '8Gb', '2', 'node2',
'5005086802232ADE', '012E00', 'active', 'switch']
node_2[3] = ['8', '9', '9', 'fc', '8Gb', '2', 'node2',
'5005086802332ADE', '9B0600', 'active', 'switch']
node_2[4] = ['9', '10', '10', 'fc', '8Gb', '2', 'node2',
'5005086802432ADE', '012A00', 'active', 'switch']
node_2[5] = ['10', '11', '11', 'fc', '8Gb', '2', 'node2',
'5005086802532ADE', '014A00', 'active', 'switch']
node_2[6] = ['11', '12', '12', 'ethernet', 'N/A', '2', 'node2',
'5005086802632ADE', '000000',
'inactive_unconfigured', 'none']
node_infos = [node_1, node_2]
node_id = int(kwargs['filtervalue'].split('=')[1]) - 1
return self._print_info_cmd(rows=node_infos[node_id], **kwargs)
# Print mostly made-up stuff in the correct syntax
def _cmd_lsportip(self, **kwargs):
if self._next_cmd_error['lsportip'] == 'ip_no_config':
self._next_cmd_error['lsportip'] = ''
ip_addr1 = ''
ip_addr2 = ''
gw = ''
else:
ip_addr1 = '1.234.56.78'
ip_addr2 = '1.234.56.79'
ip_addr3 = '1.234.56.80'
ip_addr4 = '1.234.56.81'
gw = '1.234.56.1'
rows = [None] * 17
rows[0] = ['id', 'node_id', 'node_name', 'IP_address', 'mask',
'gateway', 'IP_address_6', 'prefix_6', 'gateway_6', 'MAC',
'duplex', 'state', 'speed', 'failover', 'link_state']
rows[1] = ['1', '1', 'node1', ip_addr1, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:00', 'Full',
'online', '1Gb/s', 'no', 'active']
rows[2] = ['1', '1', 'node1', '', '', '', '', '', '',
'01:23:45:67:89:00', 'Full', 'online', '1Gb/s', 'yes', '']
rows[3] = ['2', '1', 'node1', ip_addr3, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:01', 'Full',
'configured', '1Gb/s', 'no', 'active']
rows[4] = ['2', '1', 'node1', '', '', '', '', '', '',
'01:23:45:67:89:01', 'Full', 'unconfigured', '1Gb/s',
'yes', 'inactive']
rows[5] = ['3', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no', '']
rows[6] = ['3', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes', '']
rows[7] = ['4', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no', '']
rows[8] = ['4', '1', 'node1', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes', '']
rows[9] = ['1', '2', 'node2', ip_addr2, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:02', 'Full',
'online', '1Gb/s', 'no', '']
rows[10] = ['1', '2', 'node2', '', '', '', '', '', '',
'01:23:45:67:89:02', 'Full', 'online', '1Gb/s', 'yes', '']
rows[11] = ['2', '2', 'node2', ip_addr4, '255.255.255.0',
gw, '', '', '', '01:23:45:67:89:03', 'Full',
'configured', '1Gb/s', 'no', 'inactive']
rows[12] = ['2', '2', 'node2', '', '', '', '', '', '',
'01:23:45:67:89:03', 'Full', 'unconfigured', '1Gb/s',
'yes', '']
rows[13] = ['3', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no', '']
rows[14] = ['3', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes', '']
rows[15] = ['4', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'no', '']
rows[16] = ['4', '2', 'node2', '', '', '', '', '', '', '', '',
'unconfigured', '', 'yes', '']
if self._next_cmd_error['lsportip'] == 'header_mismatch':
rows[0].pop(2)
self._next_cmd_error['lsportip'] = ''
if self._next_cmd_error['lsportip'] == 'remove_field':
for row in rows:
row.pop(1)
self._next_cmd_error['lsportip'] = ''
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lsfabric(self, **kwargs):
if self._next_cmd_error['lsfabric'] == 'no_hosts':
return ('', '')
host_name = kwargs['host'].strip('\'\"') if 'host' in kwargs else None
target_wwpn = kwargs['wwpn'] if 'wwpn' in kwargs else None
host_infos = []
for hv in self._hosts_list.values():
if (not host_name) or (hv['host_name'] == host_name):
if not target_wwpn or target_wwpn in hv['wwpns']:
host_infos.append(hv)
break
if not len(host_infos):
return ('', '')
rows = []
rows.append(['remote_wwpn', 'remote_nportid', 'id', 'node_name',
'local_wwpn', 'local_port', 'local_nportid', 'state',
'name', 'cluster_name', 'type'])
for host_info in host_infos:
for wwpn in host_info['wwpns']:
rows.append([wwpn, '123456', host_info['id'], 'nodeN',
'AABBCCDDEEFF0011', '1', '0123ABC', 'active',
host_info['host_name'], '', 'host'])
if self._next_cmd_error['lsfabric'] == 'header_mismatch':
rows[0].pop(0)
self._next_cmd_error['lsfabric'] = ''
if self._next_cmd_error['lsfabric'] == 'remove_field':
for row in rows:
row.pop(0)
self._next_cmd_error['lsfabric'] = ''
if self._next_cmd_error['lsfabric'] == 'remove_rows':
rows = []
return self._print_info_cmd(rows=rows, **kwargs)
def _get_lcmap_info(self, vol_name):
ret_vals = {
'fc_id': '',
'fc_name': '',
'lc_map_count': '0',
}
for lcmap in self._lcmappings_list.values():
if ((lcmap['source'] == vol_name) or
(lcmap['target'] == vol_name)):
ret_vals['fc_id'] = lcmap['id']
ret_vals['fc_name'] = lcmap['name']
ret_vals['lc_map_count'] = '1'
return ret_vals
# List information about vdisks
def _cmd_lsvdisk(self, **kwargs):
rows = []
rows.append(['id', 'name', 'IO_group_id', 'IO_group_name',
'status', 'mdisk_grp_id', 'mdisk_grp_name',
'capacity', 'type', 'FC_id', 'FC_name', 'RC_id',
'RC_name', 'vdisk_UID', 'lc_map_count', 'copy_count',
'fast_write_state', 'se_copy_count', 'RC_change'])
for vol in self._volumes_list.values():
if (('filtervalue' not in kwargs) or
(kwargs['filtervalue'] == 'name=' + vol['name']) or
(kwargs['filtervalue'] == 'vdisk_UID=' + vol['uid'])):
lcmap_info = self._get_lcmap_info(vol['name'])
if 'bytes' in kwargs:
cap = self._convert_bytes_units(vol['capacity'])
else:
cap = vol['capacity']
rows.append([six.text_type(vol['id']), vol['name'],
vol['IO_group_id'],
vol['IO_group_name'], 'online', '0',
get_test_pool(),
cap, 'striped',
lcmap_info['fc_id'], lcmap_info['fc_name'],
'', '', vol['uid'],
lcmap_info['lc_map_count'], '1', 'empty',
'1', 'no'])
if 'obj' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
if kwargs['obj'] not in self._volumes_list:
return self._errors['CMMVC5754E']
vol = self._volumes_list[kwargs['obj']]
lcmap_info = self._get_lcmap_info(vol['name'])
cap = vol['capacity']
cap_u = vol['used_capacity']
cap_r = vol['real_capacity']
cap_f = vol['free_capacity']
if 'bytes' not in kwargs:
for item in [cap, cap_u, cap_r, cap_f]:
item = self._convert_bytes_units(item)
rows = []
rows.append(['id', six.text_type(vol['id'])])
rows.append(['name', vol['name']])
rows.append(['IO_group_id', vol['IO_group_id']])
rows.append(['IO_group_name', vol['IO_group_name']])
rows.append(['status', 'online'])
rows.append(['capacity', cap])
rows.append(['formatted', vol['formatted']])
rows.append(['mdisk_id', ''])
rows.append(['mdisk_name', ''])
rows.append(['FC_id', lcmap_info['fc_id']])
rows.append(['FC_name', lcmap_info['fc_name']])
rows.append(['RC_id', vol['RC_id']])
rows.append(['RC_name', vol['RC_name']])
rows.append(['vdisk_UID', vol['uid']])
rows.append(['throttling', '0'])
if self._next_cmd_error['lsvdisk'] == 'blank_pref_node':
rows.append(['preferred_node_id', ''])
self._next_cmd_error['lsvdisk'] = ''
elif self._next_cmd_error['lsvdisk'] == 'no_pref_node':
self._next_cmd_error['lsvdisk'] = ''
else:
rows.append(['preferred_node_id', '1'])
rows.append(['fast_write_state', 'empty'])
rows.append(['cache', 'readwrite'])
rows.append(['udid', ''])
rows.append(['lc_map_count', lcmap_info['lc_map_count']])
rows.append(['sync_rate', '50'])
rows.append(['copy_count', '1'])
rows.append(['se_copy_count', '0'])
rows.append(['mirror_write_priority', 'latency'])
rows.append(['RC_change', 'no'])
for copy in vol['copies'].values():
rows.append(['copy_id', copy['id']])
rows.append(['status', copy['status']])
rows.append(['primary', copy['primary']])
rows.append(['mdisk_grp_id', copy['mdisk_grp_id']])
rows.append(['mdisk_grp_name', copy['mdisk_grp_name']])
rows.append(['type', 'striped'])
rows.append(['used_capacity', cap_u])
rows.append(['real_capacity', cap_r])
rows.append(['free_capacity', cap_f])
rows.append(['in_tier', copy['in_tier']])
rows.append(['compressed_copy', copy['compressed_copy']])
rows.append(['autoexpand', vol['autoexpand']])
rows.append(['warning', vol['warning']])
rows.append(['grainsize', vol['grainsize']])
if 'nohdr' in kwargs:
for index in range(len(rows)):
rows[index] = ' '.join(rows[index][1:])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
def _cmd_lsiogrp(self, **kwargs):
rows = [None] * 6
rows[0] = ['id', 'name', 'node_count', 'vdisk_count', 'host_count']
rows[1] = ['0', 'io_grp0', '2', '0', '4']
rows[2] = ['1', 'io_grp1', '2', '0', '4']
rows[3] = ['2', 'io_grp2', '0', '0', '4']
rows[4] = ['3', 'io_grp3', '0', '0', '4']
rows[5] = ['4', 'recovery_io_grp', '0', '0', '0']
return self._print_info_cmd(rows=rows, **kwargs)
# List information about hosts
def _cmd_lshost(self, **kwargs):
if 'obj' not in kwargs:
rows = []
rows.append(['id', 'name', 'port_count', 'iogrp_count', 'status'])
found = False
# Sort hosts by names to give predictable order for tests
# depend on it.
for host_name in sorted(self._hosts_list.keys()):
host = self._hosts_list[host_name]
filterstr = 'name=' + host['host_name']
if (('filtervalue' not in kwargs) or
(kwargs['filtervalue'] == filterstr)):
rows.append([host['id'], host['host_name'], '1', '4',
'offline'])
found = True
if found:
return self._print_info_cmd(rows=rows, **kwargs)
else:
return ('', '')
else:
if self._next_cmd_error['lshost'] == 'missing_host':
self._next_cmd_error['lshost'] = ''
return self._errors['CMMVC5754E']
elif self._next_cmd_error['lshost'] == 'bigger_troubles':
return self._errors['CMMVC6527E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5754E']
if (self._next_cmd_error['lshost'] == 'fail_fastpath' and
host_name == 'DifferentHost'):
return self._errors['CMMVC5701E']
host = self._hosts_list[host_name]
rows = []
rows.append(['id', host['id']])
rows.append(['name', host['host_name']])
rows.append(['port_count', '1'])
rows.append(['type', 'generic'])
rows.append(['mask', '1111'])
rows.append(['iogrp_count', '4'])
rows.append(['status', 'online'])
for port in host['iscsi_names']:
rows.append(['iscsi_name', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'offline'])
for port in host['wwpns']:
rows.append(['WWPN', port])
rows.append(['node_logged_in_count', '0'])
rows.append(['state', 'active'])
if 'nohdr' in kwargs:
for index in range(len(rows)):
rows[index] = ' '.join(rows[index][1:])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
# List iSCSI authorization information about hosts
def _cmd_lsiscsiauth(self, **kwargs):
if self._next_cmd_error['lsiscsiauth'] == 'no_info':
self._next_cmd_error['lsiscsiauth'] = ''
return ('', '')
rows = []
rows.append(['type', 'id', 'name', 'iscsi_auth_method',
'iscsi_chap_secret'])
for host in self._hosts_list.values():
method = 'none'
secret = ''
if 'chapsecret' in host:
method = 'chap'
secret = host['chapsecret']
rows.append(['host', host['id'], host['host_name'], method,
secret])
return self._print_info_cmd(rows=rows, **kwargs)
# List information about host->vdisk mappings
def _cmd_lshostvdiskmap(self, **kwargs):
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5754E']
rows = []
rows.append(['id', 'name', 'SCSI_id', 'vdisk_id', 'vdisk_name',
'vdisk_UID'])
for mapping in self._mappings_list.values():
if (host_name == '') or (mapping['host'] == host_name):
volume = self._volumes_list[mapping['vol']]
rows.append([mapping['id'], mapping['host'],
mapping['lun'], volume['id'],
volume['name'], volume['uid']])
return self._print_info_cmd(rows=rows, **kwargs)
# List information about vdisk->host mappings
def _cmd_lsvdiskhostmap(self, **kwargs):
mappings_found = 0
vdisk_name = kwargs['obj'].strip('\'\"')
if vdisk_name not in self._volumes_list:
return self._errors['CMMVC5753E']
rows = []
rows.append(['id name', 'SCSI_id', 'host_id', 'host_name', 'vdisk_UID',
'IO_group_id', 'IO_group_name'])
for mapping in self._mappings_list.values():
if (mapping['vol'] == vdisk_name):
mappings_found += 1
volume = self._volumes_list[mapping['vol']]
host = self._hosts_list[mapping['host']]
rows.append([volume['id'], mapping['lun'], host['id'],
host['host_name'], volume['uid'],
volume['IO_group_id'], volume['IO_group_name']])
if mappings_found:
return self._print_info_cmd(rows=rows, **kwargs)
else:
return ('', '')
def _cmd_lsvdisklcmappings(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
vdisk = kwargs['obj']
rows = []
rows.append(['id', 'name'])
for v in self._lcmappings_list.values():
if v['source'] == vdisk or v['target'] == vdisk:
rows.append([v['id'], v['name']])
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lslcmap(self, **kwargs):
rows = []
rows.append(['id', 'name', 'source_vdisk_id', 'source_vdisk_name',
'target_vdisk_id', 'target_vdisk_name', 'group_id',
'group_name', 'status', 'progress', 'copy_rate',
'clean_progress', 'incremental', 'partner_FC_id',
'partner_FC_name', 'restoring', 'start_time',
'rc_controlled'])
# Assume we always get a filtervalue argument
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
to_delete = []
for k, v in self._lcmappings_list.items():
if six.text_type(v[filter_key]) == filter_value:
source = self._volumes_list[v['source']]
target = self._volumes_list[v['target']]
self._state_transition('wait', v)
if self._next_cmd_error['lslcmap'] == 'speed_up':
self._next_cmd_error['lslcmap'] = ''
curr_state = v['status']
while self._state_transition('wait', v) == ("", ""):
if curr_state == v['status']:
break
curr_state = v['status']
if ((v['status'] == 'idle_or_copied' and v['autodelete'] and
v['progress'] == '100') or (v['status'] == 'end')):
to_delete.append(k)
else:
rows.append([v['id'], v['name'], source['id'],
source['name'], target['id'], target['name'],
'', '', v['status'], v['progress'],
v['copyrate'], '100', 'off', '', '', 'no', '',
'no'])
for d in to_delete:
del self._lcmappings_list[d]
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lslcconsistgrp(self, **kwargs):
rows = []
if 'obj' not in kwargs:
rows.append(['id', 'name', 'status' 'start_time'])
for lcconsistgrp in self._lcconsistgrp_list.values():
rows.append([lcconsistgrp['id'],
lcconsistgrp['name'],
lcconsistgrp['status'],
lcconsistgrp['start_time']])
return self._print_info_cmd(rows=rows, **kwargs)
else:
lcconsistgrp = None
cg_id = 0
for cg_id in self._lcconsistgrp_list.keys():
if self._lcconsistgrp_list[cg_id]['name'] == kwargs['obj']:
lcconsistgrp = self._lcconsistgrp_list[cg_id]
rows = []
rows.append(['id', six.text_type(cg_id)])
rows.append(['name', lcconsistgrp['name']])
rows.append(['status', lcconsistgrp['status']])
rows.append(['autodelete',
six.text_type(lcconsistgrp['autodelete'])])
rows.append(['start_time',
six.text_type(lcconsistgrp['start_time'])])
for lcmap_id in lcconsistgrp['lcmaps'].keys():
rows.append(['FC_mapping_id', six.text_type(lcmap_id)])
rows.append(['FC_mapping_name',
lcconsistgrp['lcmaps'][lcmap_id]])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
self._lc_cg_state_transition('wait', lcconsistgrp)
return ('%s' % '\n'.join(rows), '')
def _cmd_lsvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5804E']
name = kwargs['obj']
vol = self._volumes_list[name]
rows = []
rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'status', 'sync',
'primary', 'mdisk_grp_id', 'mdisk_grp_name', 'capacity',
'type', 'se_copy', 'in_tier', 'in_tier_status',
'compressed_copy'])
for copy in vol['copies'].values():
rows.append([vol['id'], vol['name'], copy['id'],
copy['status'], copy['sync'], copy['primary'],
copy['mdisk_grp_id'], copy['mdisk_grp_name'],
vol['capacity'], 'striped', 'yes', copy['in_tier'],
'inactive', copy['compressed_copy']])
if 'copy' not in kwargs:
return self._print_info_cmd(rows=rows, **kwargs)
else:
copy_id = kwargs['copy'].strip('\'\"')
if copy_id not in vol['copies']:
return self._errors['CMMVC6353E']
copy = vol['copies'][copy_id]
rows = []
rows.append(['vdisk_id', vol['id']])
rows.append(['vdisk_name', vol['name']])
rows.append(['capacity', vol['capacity']])
rows.append(['copy_id', copy['id']])
rows.append(['status', copy['status']])
rows.append(['sync', copy['sync']])
copy['sync'] = 'yes'
rows.append(['primary', copy['primary']])
rows.append(['mdisk_grp_id', copy['mdisk_grp_id']])
rows.append(['mdisk_grp_name', copy['mdisk_grp_name']])
rows.append(['in_tier', copy['in_tier']])
rows.append(['in_tier_status', 'inactive'])
rows.append(['compressed_copy', copy['compressed_copy']])
rows.append(['autoexpand', vol['autoexpand']])
if 'delim' in kwargs:
for index in range(len(rows)):
rows[index] = kwargs['delim'].join(rows[index])
return ('%s' % '\n'.join(rows), '')
# list vdisk sync process
def _cmd_lsvdisksyncprogress(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5804E']
name = kwargs['obj']
copy_id = kwargs.get('copy', None)
vol = self._volumes_list[name]
rows = []
rows.append(['vdisk_id', 'vdisk_name', 'copy_id', 'progress',
'estimated_completion_time'])
copy_found = False
for copy in vol['copies'].values():
if not copy_id or copy_id == copy['id']:
copy_found = True
row = [vol['id'], name, copy['id']]
if copy['sync'] == 'yes':
row.extend(['100', ''])
else:
row.extend(['50', '140210115226'])
copy['sync'] = 'yes'
rows.append(row)
if not copy_found:
return self._errors['CMMVC5804E']
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lsrcrelationship(self, **kwargs):
rows = []
rows.append(['id', 'name', 'master_cluster_id', 'master_cluster_name',
'master_vdisk_id', 'master_vdisk_name', 'aux_cluster_id',
'aux_cluster_name', 'aux_vdisk_id', 'aux_vdisk_name',
'consistency_group_id', 'primary',
'consistency_group_name', 'state', 'bg_copy_priority',
'progress', 'freeze_time', 'status', 'sync',
'copy_type', 'cycling_mode', 'cycle_period_seconds',
'master_change_vdisk_id', 'master_change_vdisk_name',
'aux_change_vdisk_id', 'aux_change_vdisk_name'])
# Assume we always get a filtervalue argument
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
for k, v in self._rcrelationship_list.items():
if six.text_type(v[filter_key]) == filter_value:
self._rc_state_transition('wait', v)
if self._next_cmd_error['lsrcrelationship'] == 'speed_up':
self._next_cmd_error['lsrcrelationship'] = ''
curr_state = v['status']
while self._rc_state_transition('wait', v) == ("", ""):
if curr_state == v['status']:
break
curr_state = v['status']
rows.append([v['id'], v['name'], v['master_cluster_id'],
v['master_cluster_name'], v['master_vdisk_id'],
v['master_vdisk_name'], v['aux_cluster_id'],
v['aux_cluster_name'], v['aux_vdisk_id'],
v['aux_vdisk_name'], v['consistency_group_id'],
v['primary'], v['consistency_group_name'],
v['state'], v['bg_copy_priority'], v['progress'],
v['freeze_time'], v['status'], v['sync'],
v['copy_type'], v['cycling_mode'],
v['cycle_period_seconds'],
v['master_change_vdisk_id'],
v['master_change_vdisk_name'],
v['aux_change_vdisk_id'],
v['aux_change_vdisk_name']])
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lspartnershipcandidate(self, **kwargs):
rows = [None] * 4
master_sys = self._system_list['instorage-mcs-sim']
aux_sys = self._system_list['aux-mcs-sim']
rows[0] = ['id', 'configured', 'name']
rows[1] = [master_sys['id'], 'no', master_sys['name']]
rows[2] = [aux_sys['id'], 'no', aux_sys['name']]
rows[3] = ['0123456789001234', 'no', 'fake_mcs']
return self._print_info_cmd(rows=rows, **kwargs)
def _cmd_lspartnership(self, **kwargs):
rows = []
rows.append(['id', 'name', 'location', 'partnership',
'type', 'cluster_ip', 'event_log_sequence'])
master_sys = self._system_list['instorage-mcs-sim']
if master_sys['name'] not in self._partnership_list:
local_info = {}
local_info['id'] = master_sys['id']
local_info['name'] = master_sys['name']
local_info['location'] = 'local'
local_info['type'] = ''
local_info['cluster_ip'] = ''
local_info['event_log_sequence'] = ''
local_info['chap_secret'] = ''
local_info['linkbandwidthmbits'] = ''
local_info['backgroundcopyrate'] = ''
local_info['partnership'] = ''
self._partnership_list[master_sys['id']] = local_info
# Assume we always get a filtervalue argument
filter_key = kwargs['filtervalue'].split('=')[0]
filter_value = kwargs['filtervalue'].split('=')[1]
for k, v in self._partnership_list.items():
if six.text_type(v[filter_key]) == filter_value:
rows.append([v['id'], v['name'], v['location'],
v['partnership'], v['type'], v['cluster_ip'],
v['event_log_sequence']])
return self._print_info_cmd(rows=rows, **kwargs)
def _get_mdiskgrp_id(self, mdiskgrp):
grp_num = len(self._flags['instorage_mcs_volpool_name'])
if mdiskgrp in self._flags['instorage_mcs_volpool_name']:
for i in range(grp_num):
if mdiskgrp == self._flags['instorage_mcs_volpool_name'][i]:
return i + 1
elif mdiskgrp == 'openstack2':
return grp_num + 1
elif mdiskgrp == 'openstack3':
return grp_num + 2
else:
return None
# Create a vdisk
def _cmd_mkvdisk(self, **kwargs):
# We only save the id/uid, name, and size - all else will be made up
volume_info = {}
volume_info['id'] = self._find_unused_id(self._volumes_list)
volume_info['uid'] = ('ABCDEF' * 3) + ('0' * 14) + volume_info['id']
mdiskgrp = kwargs['mdiskgrp'].strip('\'\"')
if mdiskgrp == kwargs['mdiskgrp']:
raise exception.InvalidInput(
reason='mdiskgrp missing quotes %s' % kwargs['mdiskgrp'])
mdiskgrp_id = self._get_mdiskgrp_id(mdiskgrp)
volume_info['mdisk_grp_name'] = mdiskgrp
volume_info['mdisk_grp_id'] = str(mdiskgrp_id)
if 'name' in kwargs:
volume_info['name'] = kwargs['name'].strip('\'\"')
else:
volume_info['name'] = 'vdisk' + volume_info['id']
# Assume size and unit are given, store it in bytes
capacity = int(kwargs['size'])
unit = kwargs['unit']
volume_info['capacity'] = self._convert_units_bytes(capacity, unit)
volume_info['IO_group_id'] = kwargs['iogrp']
volume_info['IO_group_name'] = 'io_grp%s' % kwargs['iogrp']
volume_info['RC_name'] = ''
volume_info['RC_id'] = ''
if 'intier' in kwargs:
if kwargs['intier'] == 'on':
volume_info['in_tier'] = 'on'
else:
volume_info['in_tier'] = 'off'
if 'rsize' in kwargs:
volume_info['formatted'] = 'no'
# Fake numbers
volume_info['used_capacity'] = '786432'
volume_info['real_capacity'] = '21474816'
volume_info['free_capacity'] = '38219264'
if 'warning' in kwargs:
volume_info['warning'] = kwargs['warning'].rstrip('%')
else:
volume_info['warning'] = '80'
if 'autoexpand' in kwargs:
volume_info['autoexpand'] = 'on'
else:
volume_info['autoexpand'] = 'off'
if 'grainsize' in kwargs:
volume_info['grainsize'] = kwargs['grainsize']
else:
volume_info['grainsize'] = '32'
if 'compressed' in kwargs:
volume_info['compressed_copy'] = 'yes'
else:
volume_info['compressed_copy'] = 'no'
else:
volume_info['used_capacity'] = volume_info['capacity']
volume_info['real_capacity'] = volume_info['capacity']
volume_info['free_capacity'] = '0'
volume_info['warning'] = ''
volume_info['autoexpand'] = ''
volume_info['grainsize'] = ''
volume_info['compressed_copy'] = 'no'
volume_info['formatted'] = 'yes'
if 'nofmtdisk' in kwargs:
if kwargs['nofmtdisk']:
volume_info['formatted'] = 'no'
vol_cp = {'id': '0',
'status': 'online',
'sync': 'yes',
'primary': 'yes',
'mdisk_grp_id': str(mdiskgrp_id),
'mdisk_grp_name': mdiskgrp,
'in_tier': volume_info['in_tier'],
'compressed_copy': volume_info['compressed_copy']}
volume_info['copies'] = {'0': vol_cp}
if volume_info['name'] in self._volumes_list:
return self._errors['CMMVC6035E']
else:
self._volumes_list[volume_info['name']] = volume_info
return ('Virtual Disk, id [%s], successfully created' %
(volume_info['id']), '')
# Delete a vdisk
def _cmd_rmvdisk(self, **kwargs):
force = True if 'force' in kwargs else False
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
if not force:
for mapping in self._mappings_list.values():
if mapping['vol'] == vol_name:
return self._errors['CMMVC5840E']
for lcmap in self._lcmappings_list.values():
if ((lcmap['source'] == vol_name) or
(lcmap['target'] == vol_name)):
return self._errors['CMMVC5840E']
del self._volumes_list[vol_name]
return ('', '')
def _cmd_expandvdisksize(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
# Assume unit is gb
if 'size' not in kwargs:
return self._errors['CMMVC5707E']
size = int(kwargs['size'])
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
curr_size = int(self._volumes_list[vol_name]['capacity'])
addition = size * units.Gi
self._volumes_list[vol_name]['capacity'] = (
six.text_type(curr_size + addition))
return ('', '')
def _add_port_to_host(self, host_info, **kwargs):
if 'iscsiname' in kwargs:
added_key = 'iscsi_names'
added_val = kwargs['iscsiname'].strip('\'\"')
elif 'hbawwpn' in kwargs:
added_key = 'wwpns'
added_val = kwargs['hbawwpn'].strip('\'\"')
else:
return self._errors['CMMVC5707E']
host_info[added_key].append(added_val)
for v in self._hosts_list.values():
if v['id'] == host_info['id']:
continue
for port in v[added_key]:
if port == added_val:
return self._errors['CMMVC6581E']
return ('', '')
# Make a host
def _cmd_mkhost(self, **kwargs):
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
if 'name' in kwargs:
host_name = kwargs['name'].strip('\'\"')
else:
host_name = 'host' + six.text_type(host_info['id'])
if self._is_invalid_name(host_name):
return self._errors['CMMVC6527E']
if host_name in self._hosts_list:
return self._errors['CMMVC6035E']
host_info['host_name'] = host_name
host_info['iscsi_names'] = []
host_info['wwpns'] = []
out, err = self._add_port_to_host(host_info, **kwargs)
if not len(err):
self._hosts_list[host_name] = host_info
return ('Host, id [%s], successfully created' %
(host_info['id']), '')
else:
return (out, err)
# Add ports to an existing host
def _cmd_addhostport(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
host_info = self._hosts_list[host_name]
return self._add_port_to_host(host_info, **kwargs)
# Change host properties
def _cmd_chhost(self, **kwargs):
if 'chapsecret' not in kwargs:
return self._errors['CMMVC5707E']
secret = kwargs['obj'].strip('\'\"')
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
self._hosts_list[host_name]['chapsecret'] = secret
return ('', '')
# Remove a host
def _cmd_rmhost(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
host_name = kwargs['obj'].strip('\'\"')
if host_name not in self._hosts_list:
return self._errors['CMMVC5753E']
for v in self._mappings_list.values():
if (v['host'] == host_name):
return self._errors['CMMVC5871E']
del self._hosts_list[host_name]
return ('', '')
# Create a vdisk-host mapping
def _cmd_mkvdiskhostmap(self, **kwargs):
mapping_info = {}
mapping_info['id'] = self._find_unused_id(self._mappings_list)
if 'host' not in kwargs:
return self._errors['CMMVC5707E']
mapping_info['host'] = kwargs['host'].strip('\'\"')
if 'scsi' in kwargs:
mapping_info['lun'] = kwargs['scsi'].strip('\'\"')
else:
mapping_info['lun'] = mapping_info['id']
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
mapping_info['vol'] = kwargs['obj'].strip('\'\"')
if mapping_info['vol'] not in self._volumes_list:
return self._errors['CMMVC5753E']
if mapping_info['host'] not in self._hosts_list:
return self._errors['CMMVC5754E']
if mapping_info['vol'] in self._mappings_list:
return self._errors['CMMVC6071E']
for v in self._mappings_list.values():
if ((v['host'] == mapping_info['host']) and
(v['lun'] == mapping_info['lun'])):
return self._errors['CMMVC5879E']
for v in self._mappings_list.values():
if (v['vol'] == mapping_info['vol']) and ('force' not in kwargs):
return self._errors['CMMVC6071E']
self._mappings_list[mapping_info['id']] = mapping_info
return ('Virtual Disk to Host map, id [%s], successfully created'
% (mapping_info['id']), '')
# Delete a vdisk-host mapping
def _cmd_rmvdiskhostmap(self, **kwargs):
if 'host' not in kwargs:
return self._errors['CMMVC5707E']
host = kwargs['host'].strip('\'\"')
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol = kwargs['obj'].strip('\'\"')
mapping_ids = []
for v in self._mappings_list.values():
if v['vol'] == vol:
mapping_ids.append(v['id'])
if not mapping_ids:
return self._errors['CMMVC5753E']
this_mapping = None
for mapping_id in mapping_ids:
if self._mappings_list[mapping_id]['host'] == host:
this_mapping = mapping_id
if this_mapping is None:
return self._errors['CMMVC5753E']
del self._mappings_list[this_mapping]
return ('', '')
# Create a LocalCopy mapping
def _cmd_mklcmap(self, **kwargs):
source = ''
target = ''
copyrate = kwargs['copyrate'] if 'copyrate' in kwargs else '50'
if 'source' not in kwargs:
return self._errors['CMMVC5707E']
source = kwargs['source'].strip('\'\"')
if source not in self._volumes_list:
return self._errors['CMMVC5754E']
if 'target' not in kwargs:
return self._errors['CMMVC5707E']
target = kwargs['target'].strip('\'\"')
if target not in self._volumes_list:
return self._errors['CMMVC5754E']
if source == target:
return self._errors['CMMVC6303E']
if (self._volumes_list[source]['capacity'] !=
self._volumes_list[target]['capacity']):
return self._errors['CMMVC5754E']
lcmap_info = {}
lcmap_info['source'] = source
lcmap_info['target'] = target
lcmap_info['id'] = self._find_unused_id(self._lcmappings_list)
lcmap_info['name'] = 'lcmap' + lcmap_info['id']
lcmap_info['copyrate'] = copyrate
lcmap_info['progress'] = '0'
lcmap_info['autodelete'] = True if 'autodelete' in kwargs else False
lcmap_info['status'] = 'idle_or_copied'
# Add lcmap to consistency group
if 'consistgrp' in kwargs:
consistgrp = kwargs['consistgrp']
# if is digit, assume is cg id, else is cg name
cg_id = 0
if not consistgrp.isdigit():
for consistgrp_key in self._lcconsistgrp_list.keys():
if (self._lcconsistgrp_list[consistgrp_key]['name'] ==
consistgrp):
cg_id = consistgrp_key
lcmap_info['consistgrp'] = consistgrp_key
break
else:
if int(consistgrp) in self._lcconsistgrp_list.keys():
cg_id = int(consistgrp)
# If can't find exist consistgrp id, return not exist error
if not cg_id:
return self._errors['CMMVC5754E']
lcmap_info['consistgrp'] = cg_id
# Add lcmap to consistgrp
self._lcconsistgrp_list[cg_id]['lcmaps'][lcmap_info['id']] = (
lcmap_info['name'])
self._lc_cg_state_transition('add',
self._lcconsistgrp_list[cg_id])
self._lcmappings_list[lcmap_info['id']] = lcmap_info
return('LocalCopy Mapping, id [' + lcmap_info['id'] +
'], successfully created', '')
def _cmd_prestartlcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
if self._next_cmd_error['prestartlcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['prestartlcmap'] = ''
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('prepare', lcmap)
def _cmd_startlcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
if self._next_cmd_error['startlcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['startlcmap'] = ''
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('start', lcmap)
def _cmd_stoplcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._state_transition('stop', lcmap)
def _cmd_rmlcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
force = True if 'force' in kwargs else False
if self._next_cmd_error['rmlcmap'] == 'bad_id':
id_num = -1
self._next_cmd_error['rmlcmap'] = ''
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
function = 'delete_force' if force else 'delete'
ret = self._state_transition(function, lcmap)
if lcmap['status'] == 'end':
del self._lcmappings_list[id_num]
return ret
def _cmd_chlcmap(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
id_num = kwargs['obj']
try:
lcmap = self._lcmappings_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
for key in ['name', 'copyrate', 'autodelete']:
if key in kwargs:
lcmap[key] = kwargs[key]
return ('', '')
# Create a LocalCopy mapping
def _cmd_mklcconsistgrp(self, **kwargs):
lcconsistgrp_info = {}
lcconsistgrp_info['id'] = self._find_unused_id(self._lcconsistgrp_list)
if 'name' in kwargs:
lcconsistgrp_info['name'] = kwargs['name'].strip('\'\"')
else:
lcconsistgrp_info['name'] = 'lccstgrp' + lcconsistgrp_info['id']
if 'autodelete' in kwargs:
lcconsistgrp_info['autodelete'] = True
else:
lcconsistgrp_info['autodelete'] = False
lcconsistgrp_info['status'] = 'empty'
lcconsistgrp_info['start_time'] = None
lcconsistgrp_info['lcmaps'] = {}
self._lcconsistgrp_list[lcconsistgrp_info['id']] = lcconsistgrp_info
return('LocalCopy Consistency Group, id [' + lcconsistgrp_info['id'] +
'], successfully created', '')
def _cmd_prestartlcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
cg_id = 0
for cg_id in self._lcconsistgrp_list.keys():
if cg_name == self._lcconsistgrp_list[cg_id]['name']:
break
return self._lc_cg_state_transition('prepare',
self._lcconsistgrp_list[cg_id])
def _cmd_startlcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
cg_id = 0
for cg_id in self._lcconsistgrp_list.keys():
if cg_name == self._lcconsistgrp_list[cg_id]['name']:
break
return self._lc_cg_state_transition('start',
self._lcconsistgrp_list[cg_id])
def _cmd_stoplcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
try:
lcconsistgrps = self._lcconsistgrp_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
return self._lc_cg_state_transition('stop', lcconsistgrps)
def _cmd_rmlcconsistgrp(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
cg_name = kwargs['obj']
force = True if 'force' in kwargs else False
cg_id = 0
for cg_id in self._lcconsistgrp_list.keys():
if cg_name == self._lcconsistgrp_list[cg_id]['name']:
break
if not cg_id:
return self._errors['CMMVC5753E']
lcconsistgrps = self._lcconsistgrp_list[cg_id]
function = 'delete_force' if force else 'delete'
ret = self._lc_cg_state_transition(function, lcconsistgrps)
if lcconsistgrps['status'] == 'end':
del self._lcconsistgrp_list[cg_id]
return ret
def _cmd_migratevdisk(self, **kwargs):
if 'mdiskgrp' not in kwargs or 'vdisk' not in kwargs:
return self._errors['CMMVC5707E']
mdiskgrp = kwargs['mdiskgrp'].strip('\'\"')
vdisk = kwargs['vdisk'].strip('\'\"')
if vdisk in self._volumes_list:
curr_mdiskgrp = self._volumes_list
else:
for pool in self._other_pools:
if vdisk in pool:
curr_mdiskgrp = pool
break
else:
return self._errors['CMMVC5754E']
if mdiskgrp == self._flags['instorage_mcs_volpool_name']:
tgt_mdiskgrp = self._volumes_list
elif mdiskgrp == 'openstack2':
tgt_mdiskgrp = self._other_pools['openstack2']
elif mdiskgrp == 'openstack3':
tgt_mdiskgrp = self._other_pools['openstack3']
else:
return self._errors['CMMVC5754E']
if curr_mdiskgrp == tgt_mdiskgrp:
return self._errors['CMMVC6430E']
vol = curr_mdiskgrp[vdisk]
tgt_mdiskgrp[vdisk] = vol
del curr_mdiskgrp[vdisk]
return ('', '')
def _cmd_addvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
vol = self._volumes_list[vol_name]
if 'mdiskgrp' not in kwargs:
return self._errors['CMMVC5707E']
mdiskgrp = kwargs['mdiskgrp'].strip('\'\"')
if mdiskgrp == kwargs['mdiskgrp']:
raise exception.InvalidInput(
reason='mdiskgrp missing quotes %s') % kwargs['mdiskgrp']
copy_info = {}
copy_info['id'] = self._find_unused_id(vol['copies'])
copy_info['status'] = 'online'
copy_info['sync'] = 'no'
copy_info['primary'] = 'no'
copy_info['mdisk_grp_name'] = mdiskgrp
copy_info['mdisk_grp_id'] = str(self._get_mdiskgrp_id(mdiskgrp))
if 'intier' in kwargs:
if kwargs['intier'] == 'on':
copy_info['in_tier'] = 'on'
else:
copy_info['in_tier'] = 'off'
if 'rsize' in kwargs:
if 'compressed' in kwargs:
copy_info['compressed_copy'] = 'yes'
else:
copy_info['compressed_copy'] = 'no'
vol['copies'][copy_info['id']] = copy_info
return ('Vdisk [%(vid)s] copy [%(cid)s] successfully created' %
{'vid': vol['id'], 'cid': copy_info['id']}, '')
def _cmd_rmvdiskcopy(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
if 'copy' not in kwargs:
return self._errors['CMMVC5707E']
copy_id = kwargs['copy'].strip('\'\"')
if vol_name not in self._volumes_list:
return self._errors['CMMVC5753E']
vol = self._volumes_list[vol_name]
if copy_id not in vol['copies']:
return self._errors['CMMVC6353E']
del vol['copies'][copy_id]
return ('', '')
def _cmd_chvdisk(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
vol = self._volumes_list[vol_name]
kwargs.pop('obj')
params = ['name', 'warning', 'udid',
'autoexpand', 'intier', 'primary']
for key, value in kwargs.items():
if key == 'intier':
vol['in_tier'] = value
continue
if key == 'warning':
vol['warning'] = value.rstrip('%')
continue
if key == 'name':
vol['name'] = value
del self._volumes_list[vol_name]
self._volumes_list[value] = vol
if key == 'primary':
copies = self._volumes_list[vol_name]['copies']
if value == '0':
copies['0']['primary'] = 'yes'
copies['1']['primary'] = 'no'
elif value == '1':
copies['0']['primary'] = 'no'
copies['1']['primary'] = 'yes'
else:
err = self._errors['CMMVC6353E'][1] % {'VALUE': key}
return ('', err)
if key in params:
vol[key] = value
else:
err = self._errors['CMMVC5709E'][1] % {'VALUE': key}
return ('', err)
return ('', '')
def _cmd_movevdisk(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
vol_name = kwargs['obj'].strip('\'\"')
vol = self._volumes_list[vol_name]
if 'iogrp' not in kwargs:
return self._errors['CMMVC5707E']
iogrp = kwargs['iogrp']
if iogrp.isdigit():
vol['IO_group_id'] = iogrp
vol['IO_group_name'] = 'io_grp%s' % iogrp
else:
vol['IO_group_id'] = iogrp[6:]
vol['IO_group_name'] = iogrp
return ('', '')
def _cmd_addvdiskaccess(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
return ('', '')
def _cmd_rmvdiskaccess(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
return ('', '')
def _add_host_to_list(self, connector):
host_info = {}
host_info['id'] = self._find_unused_id(self._hosts_list)
host_info['host_name'] = connector['host']
host_info['iscsi_names'] = []
host_info['wwpns'] = []
if 'initiator' in connector:
host_info['iscsi_names'].append(connector['initiator'])
if 'wwpns' in connector:
host_info['wwpns'] = host_info['wwpns'] + connector['wwpns']
self._hosts_list[connector['host']] = host_info
def _host_in_list(self, host_name):
for k in self._hosts_list:
if k.startswith(host_name):
return k
return None
# Replication related command
# Create a remote copy
def _cmd_mkrcrelationship(self, **kwargs):
master_vol = ''
aux_vol = ''
aux_cluster = ''
master_sys = self._system_list['instorage-mcs-sim']
aux_sys = self._system_list['aux-mcs-sim']
if 'master' not in kwargs:
return self._errors['CMMVC5707E']
master_vol = kwargs['master'].strip('\'\"')
if master_vol not in self._volumes_list:
return self._errors['CMMVC5754E']
if 'aux' not in kwargs:
return self._errors['CMMVC5707E']
aux_vol = kwargs['aux'].strip('\'\"')
if aux_vol not in self._volumes_list:
return self._errors['CMMVC5754E']
if 'cluster' not in kwargs:
return self._errors['CMMVC5707E']
aux_cluster = kwargs['cluster'].strip('\'\"')
if aux_cluster != aux_sys['name']:
return self._errors['CMMVC5754E']
if (self._volumes_list[master_vol]['capacity'] !=
self._volumes_list[aux_vol]['capacity']):
return self._errors['CMMVC5754E']
rcrel_info = {}
rcrel_info['id'] = self._find_unused_id(self._rcrelationship_list)
rcrel_info['name'] = 'rcrel' + rcrel_info['id']
rcrel_info['master_cluster_id'] = master_sys['id']
rcrel_info['master_cluster_name'] = master_sys['name']
rcrel_info['master_vdisk_id'] = self._volumes_list[master_vol]['id']
rcrel_info['master_vdisk_name'] = master_vol
rcrel_info['aux_cluster_id'] = aux_sys['id']
rcrel_info['aux_cluster_name'] = aux_sys['name']
rcrel_info['aux_vdisk_id'] = self._volumes_list[aux_vol]['id']
rcrel_info['aux_vdisk_name'] = aux_vol
rcrel_info['primary'] = 'master'
rcrel_info['consistency_group_id'] = ''
rcrel_info['consistency_group_name'] = ''
rcrel_info['state'] = 'inconsistent_stopped'
rcrel_info['bg_copy_priority'] = '50'
rcrel_info['progress'] = '0'
rcrel_info['freeze_time'] = ''
rcrel_info['status'] = 'online'
rcrel_info['sync'] = ''
rcrel_info['copy_type'] = 'async' if 'async' in kwargs else 'sync'
rcrel_info['cycling_mode'] = ''
rcrel_info['cycle_period_seconds'] = '300'
rcrel_info['master_change_vdisk_id'] = ''
rcrel_info['master_change_vdisk_name'] = ''
rcrel_info['aux_change_vdisk_id'] = ''
rcrel_info['aux_change_vdisk_name'] = ''
self._rcrelationship_list[rcrel_info['name']] = rcrel_info
self._volumes_list[master_vol]['RC_name'] = rcrel_info['name']
self._volumes_list[master_vol]['RC_id'] = rcrel_info['id']
self._volumes_list[aux_vol]['RC_name'] = rcrel_info['name']
self._volumes_list[aux_vol]['RC_id'] = rcrel_info['id']
return('RC Relationship, id [' + rcrel_info['id'] +
'], successfully created', '')
def _cmd_startrcrelationship(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
primary_vol = None
if 'primary' in kwargs:
primary_vol = kwargs['primary'].strip('\'\"')
try:
rcrel = self._rcrelationship_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
if rcrel['state'] == 'idling' and not primary_vol:
return self._errors['CMMVC5963E']
self._rc_state_transition('start', rcrel)
if primary_vol:
self._rcrelationship_list[id_num]['primary'] = primary_vol
return ('', '')
def _cmd_stoprcrelationship(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
force_access = True if 'access' in kwargs else False
try:
rcrel = self._rcrelationship_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
function = 'stop_access' if force_access else 'stop'
self._rc_state_transition(function, rcrel)
if force_access:
self._rcrelationship_list[id_num]['primary'] = ''
return ('', '')
def _cmd_switchrcrelationship(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5707E']
id_num = kwargs['obj']
try:
rcrel = self._rcrelationship_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
if rcrel['state'] == instorage_const.REP_CONSIS_SYNC:
rcrel['primary'] = kwargs['primary']
return ('', '')
else:
return self._errors['CMMVC5753E']
def _cmd_rmrcrelationship(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
id_num = kwargs['obj']
force = True if 'force' in kwargs else False
try:
rcrel = self._rcrelationship_list[id_num]
except KeyError:
return self._errors['CMMVC5753E']
function = 'delete_force' if force else 'delete'
self._rc_state_transition(function, rcrel)
if rcrel['state'] == 'end':
self._volumes_list[rcrel['master_vdisk_name']]['RC_name'] = ''
self._volumes_list[rcrel['master_vdisk_name']]['RC_id'] = ''
self._volumes_list[rcrel['aux_vdisk_name']]['RC_name'] = ''
self._volumes_list[rcrel['aux_vdisk_name']]['RC_id'] = ''
del self._rcrelationship_list[id_num]
return ('', '')
def _rc_state_transition(self, function, rcrel):
if (function == 'wait' and
'wait' not in self._rc_transitions[rcrel['state']]):
return ('', '')
if rcrel['state'] == 'inconsistent_copying' and function == 'wait':
if rcrel['progress'] == '0':
rcrel['progress'] = '50'
else:
rcrel['progress'] = '100'
rcrel['state'] = 'consistent_synchronized'
return ('', '')
else:
try:
curr_state = rcrel['state']
rcrel['state'] = self._rc_transitions[curr_state][function]
return ('', '')
except Exception:
return self._errors['CMMVC5982E']
def _cmd_mkippartnership(self, **kwargs):
if 'clusterip' not in kwargs:
return self._errors['CMMVC5707E']
clusterip = kwargs['master'].strip('\'\"')
if 'linkbandwidthmbits' not in kwargs:
return self._errors['CMMVC5707E']
bandwidth = kwargs['linkbandwidthmbits'].strip('\'\"')
if 'backgroundcopyrate' not in kwargs:
return self._errors['CMMVC5707E']
copyrate = kwargs['backgroundcopyrate'].strip('\'\"')
if clusterip == '192.168.10.21':
partner_info_id = self._system_list['instorage-mcs-sim']['id']
partner_info_name = self._system_list['instorage-mcs-sim']['name']
else:
partner_info_id = self._system_list['aux-mcs-sim']['id']
partner_info_name = self._system_list['aux-mcs-sim']['name']
partner_info = {}
partner_info['id'] = partner_info_id
partner_info['name'] = partner_info_name
partner_info['location'] = 'remote'
partner_info['type'] = 'ipv4'
partner_info['cluster_ip'] = clusterip
partner_info['event_log_sequence'] = ''
partner_info['chap_secret'] = ''
partner_info['linkbandwidthmbits'] = bandwidth
partner_info['backgroundcopyrate'] = copyrate
partner_info['partnership'] = 'fully_configured'
self._partnership_list[partner_info['id']] = partner_info
return('', '')
def _cmd_mkfcpartnership(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
peer_sys = kwargs['obj']
if 'linkbandwidthmbits' not in kwargs:
return self._errors['CMMVC5707E']
bandwidth = kwargs['linkbandwidthmbits'].strip('\'\"')
if 'backgroundcopyrate' not in kwargs:
return self._errors['CMMVC5707E']
copyrate = kwargs['backgroundcopyrate'].strip('\'\"')
partner_info = {}
partner_info['id'] = self._system_list[peer_sys]['id']
partner_info['name'] = peer_sys
partner_info['location'] = 'remote'
partner_info['type'] = 'fc'
partner_info['cluster_ip'] = ''
partner_info['event_log_sequence'] = ''
partner_info['chap_secret'] = ''
partner_info['linkbandwidthmbits'] = bandwidth
partner_info['backgroundcopyrate'] = copyrate
partner_info['partnership'] = 'fully_configured'
self._partnership_list[partner_info['id']] = partner_info
return('', '')
def _cmd_chpartnership(self, **kwargs):
if 'obj' not in kwargs:
return self._errors['CMMVC5701E']
peer_sys = kwargs['obj']
if peer_sys not in self._partnership_list:
return self._errors['CMMVC5753E']
partner_state = ('fully_configured' if 'start' in kwargs
else 'fully_configured_stopped')
self._partnership_list[peer_sys]['partnership'] = partner_state
return('', '')
# The main function to run commands on the management simulator
def execute_command(self, cmd, check_exit_code=True):
try:
kwargs = self._cmd_to_dict(cmd)
except IndexError:
return self._errors['CMMVC5707E']
command = kwargs.pop('cmd')
func = getattr(self, '_cmd_' + command)
out, err = func(**kwargs)
if (check_exit_code) and (len(err) != 0):
raise processutils.ProcessExecutionError(exit_code=1,
stdout=out,
stderr=err,
cmd=' '.join(cmd))
return (out, err)
# After calling this function, the next call to the specified command will
# result in in the error specified
def error_injection(self, cmd, error):
self._next_cmd_error[cmd] = error
def change_vdiskcopy_attr(self, vol_name, key, value, copy="primary"):
if copy == 'primary':
self._volumes_list[vol_name]['copies']['0'][key] = value
elif copy == 'secondary':
self._volumes_list[vol_name]['copies']['1'][key] = value
else:
msg = "The copy should be primary or secondary"
raise exception.InvalidInput(reason=msg)
|
Python
|
CL
|
f1562c1e874966f01f6f9015b8b4f89090fd39cde3a18b814e1ee3bb376b22eb
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Union
import networkx as nx
import numpy as np
from scipy.stats import rankdata
from . import assertions
def diagonal_augmentation(
graph: Union[nx.Graph, nx.DiGraph],
weight_column: str = 'weight'
) -> nx.Graph:
"""
Replaces the diagonal of adjacency matrix of the graph with the
weighted degree / number of vertices in graph. For directed graphs,
the weighted in and out degree is averaged.
Modifies the provided graph in place as well as returning it.
:param: The networkx graph which will get a replaced diagonal
:type graph: Union[nx.Graph, nx.DiGraph]
:param str weight_column: The weight column of the edge
:return: The networkx Graph or DiGraph object that was modified in place.
:rtype: Union[nx.Graph, nx.DiGraph]
"""
assertions.assert_is_graph(graph)
vertices = graph.nodes()
vertex_count = len(vertices)
for vertex in vertices:
# remove self loops
if graph.has_edge(vertex, vertex):
graph.remove_edge(vertex, vertex)
if isinstance(graph, nx.DiGraph):
in_degree = graph.in_degree(vertex, weight=weight_column)
out_degree = graph.out_degree(vertex, weight=weight_column)
weighted_degree = (in_degree + out_degree) / 2
else:
weighted_degree = graph.degree(vertex, weight=weight_column)
# add the augmented weight back onto the diagonal
graph.add_edge(vertex, vertex)
graph[vertex][vertex][weight_column] = weighted_degree / (vertex_count - 1)
return graph
def __scale_edge_weights__(weight, edge_count):
# This is meant to scale edge weights between 0 and 2
return (weight * 2) / (edge_count + 1)
def rank_edges(
graph: nx.Graph,
weight_column: str = 'weight'
) -> nx.Graph:
"""
Ranks the edges of a networkx.classes.graph.Graph object according to the values associated to the
`weight_column` in the edge attributes
:param networkx.Graph graph: The graph we will rank the edges in. MUST contain an attribute that corresponds to
`weight_column` (default value: `weight`)
:param str weight_column: edge attribute that contains the weight value. Default is `weight`
:return: Updated graph with new weights between 0 and 2, exclusive. Based on scipy.stats rankdata function.
:rtype: networkx.Graph
:raise UnweightedGraphException: if the graph not weighted by the provided `weight_column`
:raise TypeError: If the `graph` provided is not an `nx.Graph`
:examples:
>>> g = nx.Graph()
>>> g.add_edge("1", "2", weight=3)
>>> g.add_edge("2", "3", weight=4)
>>> g.add_edge("4", "5", weight=2)
>>> g = rank_edges(g)
>>> g.edges(data=True) #doctest: +NORMALIZE_WHITESPACE
EdgeDataView([('1', '2', {'weight': 1.0}),
('2', '3', {'weight': 1.5}),
('4', '5', {'weight': 0.5})])
"""
assertions.assert_is_weighted_graph(graph, weight_column)
edge_count = len(graph.edges())
edge_data = graph.edges(data=True)
edges = np.array(
list(
map(
lambda x: x[2][weight_column],
edge_data
)
),
np.float64
)
ranked_values = rankdata(edges)
i = 0
for source, target, data in edge_data:
data[weight_column] = __scale_edge_weights__(ranked_values[i], edge_count)
i += 1
return graph
|
Python
|
CL
|
dba6a18701f2e9ba70f2f9130ef95acebabb8f9917ddf91013b109de6c309451
|
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of metalibm (https://github.com/kalray/metalibm)
###############################################################################
# MIT License
#
# Copyright (c) 2018 Kalray
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
###############################################################################
# Description: optimization pass to expand multi-precision node to simple
# precision implementation
###############################################################################
import sollya
from metalibm_core.core.passes import OptreeOptimization, Pass, LOG_PASS_INFO
from metalibm_core.opt.node_transformation import Pass_NodeTransformation
from metalibm_core.opt.opt_utils import forward_attributes
from metalibm_core.core.ml_formats import (
ML_Bool,
ML_FP_MultiElementFormat,
ML_Binary32, ML_Binary64,
ML_SingleSingle,
ML_DoubleDouble, ML_TripleDouble
)
from metalibm_core.core.ml_operations import (
Abs, Addition, Comparison, Equal, LogicalAnd, LogicalOr, Select, Subnormalize, Subtraction, Multiplication,
FusedMultiplyAdd,
Conversion, Negation,
Constant, TableLoad, Test, Variable,
BuildFromComponent, ComponentSelection,
Max, Min,
is_leaf_node,
)
from metalibm_core.opt.ml_blocks import (
Add222, Add122, Add221, Add212,
Add121, Add112, Add122,
Add211,
Mul212, Mul221, Mul211, Mul222,
Mul122, Mul121, Mul112,
MP_FMA2111, MP_FMA2112, MP_FMA2122, MP_FMA2212, MP_FMA2121, MP_FMA2211,
MP_FMA2222,
MP_Add323, MP_Add332, MP_Add333,
MP_Mul322, MP_Mul332, MP_Mul323,
subnormalize_multi,
Normalize_33,
)
from metalibm_core.utility.log_report import Log
# high verbosity log-level for expand_multi_precision pass module
LOG_LEVEL_EXPAND_VERBOSE = Log.LogLevel("ExpandVerbose")
def is_subnormalize_op(node):
""" test if @p node is a Subnormalize operation """
return isinstance(node, Subnormalize)
def get_elementary_precision(multi_precision):
""" return the elementary precision corresponding
to multi_precision """
multi_precision = multi_precision.get_match_format()
if isinstance(multi_precision, ML_FP_MultiElementFormat):
return multi_precision.field_format_list[0]
else:
return multi_precision
def is_multi_precision_format(precision):
""" check if precision is a multi-element FP format """
if precision is None:
return False
return isinstance(precision.get_match_format(), ML_FP_MultiElementFormat)
def multi_element_output(node):
""" return True if node's output format is a multi-precision type """
return is_multi_precision_format(node.precision)
def multi_element_inputs(node):
""" return True if any of node's input has a multi-precision type """
return not is_leaf_node(node) and any(is_multi_precision_format(op_input.precision) for op_input in node.get_inputs())
def has_component_selection_input(node):
""" Check if any of node's input is a ComponentSelection node """
return not is_leaf_node(node) and any(isinstance(op, ComponentSelection) for op in node.get_inputs())
class MultiPrecisionExpander:
def __init__(self, target):
self.target = target
self.memoization_map = {}
def expand_cst(self, cst_node):
""" Expand a Constant node in multi-precision format into a list
of Constants node in scalar format and returns the list """
cst_multiformat = cst_node.precision
cst_value = cst_node.get_value()
cst_list = []
for elt_format in cst_multiformat.field_format_list:
cst_sub_value = elt_format.round_sollya_object(cst_value)
cst_list.append(Constant(cst_sub_value, precision=elt_format))
# updating cst_value
cst_value -= cst_sub_value
return tuple(cst_list)
def expand_var(self, var_node):
""" Expand a variable in multi-precision format into
a list of ComponentSelection nodes """
var_multiformat = var_node.precision.get_match_format()
if len(var_multiformat.field_format_list) == 2:
return (var_node.hi, var_node.lo)
elif len(var_multiformat.field_format_list) == 3:
return (var_node.hi, var_node.me, var_node.lo)
else:
return tuple([
ComponentSelection(
var_node,
precision=elt_format,
specifier=ComponentSelection.Field(index)
) for index, elt_format in enumerate(var_multiformat.field_format_list)
])
def tag_expansion(self, node, expansion):
""" set tags to element of list @p expansion
which were dervied from @p node """
suffix_list = {
1: ["_hi"],
2: ["_hi", "_lo"],
3: ["_hi", "_me", "_lo"]
}
expansion_len = len(expansion)
node_tag = node.get_tag()
tag_prefix = "" if node_tag is None else node_tag
if expansion_len in suffix_list:
for elt, suffix in zip(expansion, suffix_list[expansion_len]):
elt.set_tag(tag_prefix + suffix)
else:
for index, elt in enumerate(expansion):
elt.set_tag("{}_s{}".format(node_tag, expansion_len - 1 - index))
def expand_op(self, node, expander_map, arity=2):
""" Generic expansion method for 2-operand node """
operands = [node.get_input(i) for i in range(arity)]
def wrap_expand(op):
""" expand node and returns it if no modification occurs """
expanded_node = self.expand_node(op)
return (op,) if expanded_node is None else expanded_node
operands_expansion = [list(wrap_expand(op)) for op in operands]
operands_format = [op.precision.get_match_format() for op in operands]
result_precision = node.precision.get_match_format()
elt_precision = get_elementary_precision(result_precision)
try:
expansion_key = (result_precision, tuple(operands_format))
expander = expander_map[expansion_key]
except KeyError:
Log.report(
Log.Error,
"unable to find multi-precision expander for {}, key is {}",
node, str(expansion_key))
new_op = expander(*(sum(operands_expansion, [])), precision=elt_precision)
# setting dedicated name to expanded node
self.tag_expansion(node, new_op)
# forward other attributes
for elt in new_op:
elt.set_debug(node.get_debug())
elt.set_handle(node.get_handle())
return new_op
def expand_add(self, add_node):
""" Expand Addition """
ADD_EXPANSION_MAP = {
# double precision based formats
(ML_DoubleDouble, (ML_Binary64, ML_Binary64)): Add211,
(ML_DoubleDouble, (ML_DoubleDouble, ML_Binary64)): Add221,
(ML_DoubleDouble, (ML_Binary64, ML_DoubleDouble)): Add212,
(ML_DoubleDouble, (ML_DoubleDouble, ML_DoubleDouble)): Add222,
(ML_Binary64, (ML_DoubleDouble, ML_Binary64)): Add121,
(ML_Binary64, (ML_Binary64, ML_DoubleDouble)): Add112,
(ML_Binary64, (ML_DoubleDouble, ML_DoubleDouble)): Add122,
(ML_TripleDouble, (ML_TripleDouble, ML_TripleDouble)): MP_Add333,
(ML_TripleDouble, (ML_TripleDouble, ML_DoubleDouble)): MP_Add332,
(ML_TripleDouble, (ML_DoubleDouble, ML_TripleDouble)): MP_Add323,
# single precision based formats
(ML_SingleSingle, (ML_Binary32, ML_Binary32)): Add211,
(ML_SingleSingle, (ML_SingleSingle, ML_Binary32)): Add221,
(ML_SingleSingle, (ML_Binary32, ML_SingleSingle)): Add212,
(ML_SingleSingle, (ML_SingleSingle, ML_SingleSingle)): Add222,
(ML_Binary32, (ML_SingleSingle, ML_Binary32)): Add121,
(ML_Binary32, (ML_Binary32, ML_SingleSingle)): Add112,
(ML_Binary32, (ML_SingleSingle, ML_SingleSingle)): Add122,
}
return self.expand_op(add_node, ADD_EXPANSION_MAP, arity=2)
def expand_abs(self, absNode):
""" legalize a 2-elt multi-precision Abs node """
# TODO/FIXME: factorize with multi_precision.legalize_mp_2elt_abs
operand = absNode.get_input(0)
new_ops = self.expand_node(operand)
predicate = Comparison(new_ops[0], Constant(0, precision=new_ops[0].get_precision()), specifier=Comparison.GreaterOrEqual, precision=ML_Bool)
return [Abs(new_ops[0], precision=new_ops[0].precision)] + [Select(predicate, op, Negation(op), precision=op.get_precision()) for op in new_ops[1:]]
def expand_mul(self, mul_node):
""" Expand Multiplication """
MUL_EXPANSION_MAP = {
# double precision based formats
(ML_DoubleDouble, (ML_DoubleDouble, ML_DoubleDouble)): Mul222,
(ML_DoubleDouble, (ML_Binary64, ML_DoubleDouble)): Mul212,
(ML_DoubleDouble, (ML_DoubleDouble, ML_Binary64)): Mul221,
(ML_DoubleDouble, (ML_Binary64, ML_Binary64)): Mul211,
(ML_Binary64, (ML_DoubleDouble, ML_DoubleDouble)): Mul122,
(ML_Binary64, (ML_DoubleDouble, ML_Binary64)): Mul121,
(ML_Binary64, (ML_Binary64, ML_DoubleDouble)): Mul112,
(ML_TripleDouble, (ML_DoubleDouble, ML_DoubleDouble)): MP_Mul322,
(ML_TripleDouble, (ML_TripleDouble, ML_DoubleDouble)): MP_Mul332,
(ML_TripleDouble, (ML_DoubleDouble, ML_TripleDouble)): MP_Mul323,
# single precision based formats
(ML_SingleSingle, (ML_SingleSingle, ML_SingleSingle)): Mul222,
(ML_SingleSingle, (ML_Binary32, ML_SingleSingle)): Mul212,
(ML_SingleSingle, (ML_SingleSingle, ML_Binary32)): Mul221,
(ML_SingleSingle, (ML_Binary32, ML_Binary32)): Mul211,
(ML_Binary32, (ML_SingleSingle, ML_SingleSingle)): Mul122,
(ML_Binary32, (ML_SingleSingle, ML_Binary32)): Mul121,
(ML_Binary32, (ML_Binary32, ML_SingleSingle)): Mul112,
}
return self.expand_op(mul_node, MUL_EXPANSION_MAP, arity=2)
def expand_fma(self, fma_node):
""" Expand Fused-Multiply Add """
FMA_EXPANSION_MAP = {
# double precision based formats
(ML_DoubleDouble, (ML_DoubleDouble, ML_DoubleDouble, ML_DoubleDouble)): MP_FMA2222,
(ML_DoubleDouble, (ML_DoubleDouble, ML_Binary64, ML_DoubleDouble)): MP_FMA2212,
(ML_DoubleDouble, (ML_Binary64, ML_DoubleDouble, ML_DoubleDouble)): MP_FMA2122,
(ML_DoubleDouble, (ML_Binary64, ML_Binary64, ML_DoubleDouble)): MP_FMA2112,
(ML_DoubleDouble, (ML_Binary64, ML_DoubleDouble, ML_Binary64)): MP_FMA2121,
(ML_DoubleDouble, (ML_DoubleDouble, ML_Binary64, ML_Binary64)): MP_FMA2211,
(ML_DoubleDouble, (ML_Binary64, ML_Binary64, ML_Binary64)): MP_FMA2111,
# single precision based formats
(ML_SingleSingle, (ML_SingleSingle, ML_SingleSingle, ML_SingleSingle)): MP_FMA2222,
(ML_SingleSingle, (ML_SingleSingle, ML_Binary32, ML_SingleSingle)): MP_FMA2212,
(ML_SingleSingle, (ML_Binary32, ML_SingleSingle, ML_SingleSingle)): MP_FMA2122,
(ML_SingleSingle, (ML_Binary32, ML_Binary32, ML_SingleSingle)): MP_FMA2112,
(ML_SingleSingle, (ML_Binary32, ML_SingleSingle, ML_Binary32)): MP_FMA2121,
(ML_SingleSingle, (ML_SingleSingle, ML_Binary32, ML_Binary32)): MP_FMA2211,
(ML_SingleSingle, (ML_Binary32, ML_Binary32, ML_Binary32)): MP_FMA2111,
}
return self.expand_op(fma_node, FMA_EXPANSION_MAP, arity=3)
def expand_subnormalize(self, sub_node):
""" Expand Subnormalize on multi-component node """
operand = sub_node.get_input(0)
factor = sub_node.get_input(1)
exp_operand = self.expand_node(operand)
elt_precision = get_elementary_precision(sub_node.precision)
return subnormalize_multi(exp_operand, factor, precision=elt_precision)
def expand_negation(self, neg_node):
""" Expand Negation on multi-component node """
op_input = neg_node.get_input(0)
neg_operands = self.expand_node(op_input)
if neg_operands is None:
# input could not be expanded
if multi_element_output(op_input):
neg_operands = self.expand_var(op_input)
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "expanding Negation {} into {}", neg_node, neg_operands)
return [Negation(op, precision=op.precision) for op in neg_operands]
def expand_select(self, selectNode):
""" Expand Select on multi-component node """
pred = selectNode.get_input(0)
lhs = selectNode.get_input(1)
rhs = selectNode.get_input(2)
expandedLhs = self.expand_node(lhs)
expandedRhs = self.expand_node(rhs)
assert not expandedLhs is None
assert not expandedRhs is None
return [Select(pred, a, b, precision=a.get_precision()) for (a, b) in zip(expandedLhs, expandedRhs)]
def expand_max(self, maxNode):
""" Expand Max on multi-component node """
lhs = maxNode.get_input(0)
rhs = maxNode.get_input(1)
expandedLhs = self.expand_node(lhs)
expandedRhs = self.expand_node(rhs)
assert not expandedLhs is None
assert not expandedRhs is None
predGt = Comparison(expandedLhs[0], expandedRhs[0], precision=ML_Bool, specifier=Comparison.Greater)
predEq = Equal(expandedLhs[0], expandedRhs[0], precision=ML_Bool)
for (a, b) in zip(expandedLhs[1:], expandedRhs[1:]):
predGt = LogicalOr(predGt, LogicalAnd(predEq, Comparison(a, b, specifier=Comparison.Greater, precision=ML_Bool)))
predEq = LogicalAnd(predEq, Equal(a, b, precision=ML_Bool))
return [Select(predGt, a, b, precision=a.get_precision()) for (a, b) in zip(expandedLhs, expandedRhs)]
def legalize_test_nan(self, selectNode):
""" Expand Test.IsNaN on multi-component node """
op = selectNode.get_input(0)
expOps = self.expand_node(op)
assert not expOps is None
pred = Test(expOps[0], specifier=Test.IsNaN, precision=ML_Bool)
for limb in expOps:
pred = LogicalOr(pred, Test(limb, specifier=Test.IsNaN, precision=ML_Bool), precision=ML_Bool)
return pred
def expand_min(self, minNode):
""" Expand Min on multi-component node """
lhs = minNode.get_input(0)
rhs = minNode.get_input(1)
expandedLhs = self.expand_node(lhs)
expandedRhs = self.expand_node(rhs)
assert not expandedLhs is None
assert not expandedRhs is None
predGt = Comparison(expandedLhs[0], expandedRhs[0], precision=ML_Bool, specifier=Comparison.Greater)
predEq = Equal(expandedLhs[0], expandedRhs[0], precision=ML_Bool)
for (a, b) in zip(expandedLhs[1:], expandedRhs[1:]):
predGt = LogicalOr(predGt, LogicalAnd(predEq, Comparison(a, b, specifier=Comparison.Greater, precision=ML_Bool)))
predEq = LogicalAnd(predEq, Equal(a, b, precision=ML_Bool))
return [Select(predGt, b, a, precision=a.get_precision()) for (a, b) in zip(expandedLhs, expandedRhs)]
def is_expandable(self, node):
""" Returns True if @p can be expanded from a multi-precision
node to a list of scalar-precision fields,
returns False otherwise """
expandable = ((multi_element_output(node) or multi_element_inputs(node))) and \
(isinstance(node, Addition) or \
isinstance(node, Multiplication) or \
isinstance(node, Subtraction) or \
isinstance(node, Conversion) or \
isinstance(node, FusedMultiplyAdd) or \
isinstance(node, Negation) or \
isinstance(node, BuildFromComponent) or \
isinstance(node, ComponentSelection) or \
isinstance(node, Select) or \
isinstance(node, Max) or \
isinstance(node, Min) or \
is_subnormalize_op(node))
if not expandable:
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "{} cannot be expanded", node)
return expandable
def expand_conversion(self, node):
""" Expand Conversion node """
# optimizing Conversion
op_input = self.expand_node(node.get_input(0))
if not op_input is None:
if op_input[0].precision == node.precision:
# if the conversion is from a multi-precision node to a result whose
# precision matches the multi-precision high component, then directly
# returns it
# TODO/FIXME: does not take into account possible overlap between
# limbs
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "expanding conversion {} into {}", node, op_input[0])
return [op_input[0]]
elif is_multi_precision_format(node.precision) and \
node.precision.limb_num >= len(op_input) and \
all(op.precision == limb_prec for op, limb_prec in zip(op_input, node.precision.field_format_list)):
# if the conversion is from a multi-element format to a larger multi-element format
# just pad the input with 0
pad_size = node.precision.limb_num - len(op_input)
return op_input + tuple(Constant(0, precision=node.precision.get_limb_precision(len(op_input) + i)) for i in range(pad_size))
elif is_multi_precision_format(node.precision) and \
node.precision.limb_num < len(op_input) and \
all(op.precision == limb_prec for op, limb_prec in zip(op_input, node.precision.field_format_list)):
# if the conversion if from a multi-element format to a smaller
# multi-element format than insert a normalization and
# return the appropriate limb
assert node.precision.limb_num == 2 and len(op_input) == 3
normalized_op = Normalize_33(*op_input, precision=op_input[0].precision)
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "expanding conversion {} into {}, {}", node, normalized_op[0], normalized_op[1])
return normalized_op[0], normalized_op[1]
return None
def expand_sub(self, node):
lhs = node.get_input(0)
rhs = node.get_input(1)
tag = node.get_tag()
precision = node.get_precision()
# Subtraction x - y is transformed into x + (-y)
# WARNING: if y is not expandable (e.g. scalar precision)
# this could stop expansion
new_node = Addition(
lhs,
Negation(
rhs,
precision=rhs.precision
),
precision=precision
)
forward_attributes(node, new_node)
expanded_node = self.expand_node(new_node)
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "expanding Subtraction {} into {} with expanded form {}", node, new_node, ", ".join((op.get_str(display_precision=True, depth=None)) for op in expanded_node))
return expanded_node
def expand_build_from_component(self, node):
op_list = ((self.expand_node(op), op) for op in node.get_inputs())
result = tuple(op if expanded is None else expanded for (op, expanded) in op_list)
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "expanding BuildFromComponent {} into {}", node, result)
return result
def expand_component_selection(self, node):
# TODO: manage TD normalization properly
if is_leaf_node(node.get_input(0)) or isinstance(node.get_input(0), TableLoad):
# discard expansion of leaf nodes (Variable, ...)
return None
op_list = self.expand_node(node.get_input(0))
OP_INDEX_MAP = {
ComponentSelection.Hi: 0,
ComponentSelection.Me: -2,
ComponentSelection.Lo: -1
}
op_index = OP_INDEX_MAP[node.specifier]
result = op_list[op_index]
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "expanding ComponentSelection {} into {}", node, result)
return (result,)
def reconstruct_from_transformed(self, node, transformed_node):
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "reconstructed : {}", node)
Log.report(
LOG_LEVEL_EXPAND_VERBOSE,
"from transformed: {}", "\n".join([str(n) for n in transformed_node]))
if isinstance(node, Constant):
result = node
else:
if len(transformed_node) == 1:
result = transformed_node[0]
else:
result = BuildFromComponent(*tuple(transformed_node), precision=node.precision)
forward_attributes(node, result)
result.set_tag(node.get_tag())
Log.report(LOG_LEVEL_EXPAND_VERBOSE, " result is : {}", result)
return result
def expand_node(self, node):
""" If node @p node is a multi-precision node, expands to a list
of scalar element, ordered from most to least significant """
if node in self.memoization_map:
return self.memoization_map[node]
else:
if not (multi_element_output(node) or multi_element_inputs(node)):
if not is_leaf_node(node):
# recursive processing of node's input
for index, op in enumerate(node.get_inputs()):
op_input = self.expand_node(op)
if not op_input is None:
reconstructed_input = self.reconstruct_from_transformed(op, op_input)
node.set_input(index, reconstructed_input)
result = (node,)
elif isinstance(node, Variable):
result = self.expand_var(node)
elif isinstance(node, Constant):
result = self.expand_cst(node)
elif isinstance(node, Addition):
result = self.expand_add(node)
elif isinstance(node, Abs):
result = self.expand_abs(node)
elif isinstance(node, Multiplication):
result = self.expand_mul(node)
elif isinstance(node, Subtraction):
result = self.expand_sub(node)
elif isinstance(node, FusedMultiplyAdd):
result = self.expand_fma(node)
elif isinstance(node, Conversion):
result = self.expand_conversion(node)
elif isinstance(node, Negation):
result = self.expand_negation(node)
elif isinstance(node, Select):
result = self.expand_select(node)
elif isinstance(node, Max):
result = self.expand_max(node)
elif isinstance(node, Min):
result = self.expand_min(node)
#elif isinstance(node, Test) and node.specifier == Test.IsNaN:
# result = self.legalize_test_nan(node)
elif isinstance(node, BuildFromComponent):
result = self.expand_build_from_component(node)
elif isinstance(node, ComponentSelection):
result = self.expand_component_selection(node)
elif is_subnormalize_op(node):
result = self.expand_subnormalize(node)
else:
if is_leaf_node(node):
pass
else:
# recursive processing of node's input
for index, op in enumerate(node.get_inputs()):
op_input = self.expand_node(op)
if not op_input is None:
reconstructed_input = self.reconstruct_from_transformed(op, op_input)
node.set_input(index, reconstructed_input)
# no modification
result = None
if result is None:
Log.report(LOG_LEVEL_EXPAND_VERBOSE, "expansion is None for {}", node)
self.memoization_map[node] = result
return result
def dirty_multi_node_expand(node, precision, mem_map=None, fma=True):
""" Dirty expand node into Hi and Lo part, storing
already processed temporary values in mem_map """
mem_map = mem_map or {}
if node in mem_map:
return mem_map[node]
elif isinstance(node, Constant):
value = node.get_value()
value_hi = sollya.round(value, precision.sollya_object, sollya.RN)
value_lo = sollya.round(value - value_hi, precision.sollya_object, sollya.RN)
ch = Constant(value_hi,
tag=node.get_tag() + "hi",
precision=precision)
cl = Constant(value_lo,
tag=node.get_tag() + "lo",
precision=precision
) if value_lo != 0 else None
if cl is None:
Log.report(Log.Info, "simplified constant")
result = ch, cl
mem_map[node] = result
return result
else:
# Case of Addition or Multiplication nodes:
# 1. retrieve inputs
# 2. dirty convert inputs recursively
# 3. forward to the right metamacro
assert isinstance(node, Addition) or isinstance(node, Multiplication)
lhs = node.get_input(0)
rhs = node.get_input(1)
op1h, op1l = dirty_multi_node_expand(lhs, precision, mem_map, fma)
op2h, op2l = dirty_multi_node_expand(rhs, precision, mem_map, fma)
if isinstance(node, Addition):
result = Add222(op1h, op1l, op2h, op2l) \
if op1l is not None and op2l is not None \
else Add212(op1h, op2h, op2l) \
if op1l is None and op2l is not None \
else Add212(op2h, op1h, op1l) \
if op2l is None and op1l is not None \
else Add211(op1h, op2h)
mem_map[node] = result
return result
elif isinstance(node, Multiplication):
result = Mul222(op1h, op1l, op2h, op2l, fma=fma) \
if op1l is not None and op2l is not None \
else Mul212(op1h, op2h, op2l, fma=fma) \
if op1l is None and op2l is not None \
else Mul212(op2h, op1h, op1l, fma=fma) \
if op2l is None and op1l is not None \
else Mul211(op1h, op2h, fma=fma)
mem_map[node] = result
return result
class Pass_ExpandMultiPrecision(Pass_NodeTransformation):
""" Expand node working with Multi-Precision formats into
expanded operation sub-graph working only with single-word formats """
pass_tag = "expand_multi_precision"
def __init__(self, target):
OptreeOptimization.__init__(
self, "multi-precision expansion pass", target)
## memoization map for promoted optree
self.memoization_map = {}
self.expander = MultiPrecisionExpander(target)
def can_be_transformed(self, node, *args):
""" Returns True if @p can be expanded from a multi-precision
node to a list of scalar-precision fields,
returns False otherwise """
return self.expander.is_expandable(node)
def transform_node(self, node, transformed_inputs, *args):
""" If node can be transformed returns the transformed node
else returns None """
return self.expander.expand_node(node)
def reconstruct_from_transformed(self, node, transformed_node):
"""return a node at the root of a transformation chain,
compatible with untransformed nodes """
return self.expander.reconstruct_from_transformed(node, transformed_node)
## standard Opt pass API
def execute(self, optree):
""" Implémentation of the standard optimization pass API """
return self.transform_graph(optree)
Log.report(LOG_PASS_INFO, "Registering expand_multi_precision pass")
# register pass
Pass.register(Pass_ExpandMultiPrecision)
|
Python
|
CL
|
78d1ebddcd7849708d31f792068e41bf5151663f4147bd38e68860698348c72c
|
# Generated by Django 2.2.7 on 2019-12-02 19:49
import django.contrib.auth.models
from django.db import migrations, models
import usuario.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('cliente', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Usuario',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('tipo', models.CharField(choices=[('ADMINISTRADOR', 'Administrador'), ('CLIENTE', 'Cliente')], max_length=15, verbose_name='Tipo do usuário')),
('nome', models.CharField(max_length=100, verbose_name='Nome')),
('email', models.EmailField(db_index=True, max_length=100, unique=True, verbose_name='Email')),
('is_staff', models.BooleanField(default=False, verbose_name='Permitir acesso como gerenciador do sistema')),
('is_active', models.BooleanField(default=True, help_text='Liberar acesso ao sistema', verbose_name='ativo')),
('slug', models.SlugField(blank=True, max_length=100, null=True, verbose_name='slug')),
('clientes', models.ManyToManyField(to='cliente.Cliente')),
],
options={
'verbose_name': 'usuário',
'verbose_name_plural': 'usuários',
'ordering': ['nome'],
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
('administradores_ativos', usuario.models.UsuarioAdministradoresAtivosManager()),
('clientes_ativos', usuario.models.UsuarioClientesAtivosManager()),
],
),
]
|
Python
|
CL
|
dd2349785badc5600123206a95b49770db85d01c3ec410c3e0234014884e25a9
|
import datetime
import uuid
import pytest
from flask import current_app
from AIPscan.models import File, FileType
from AIPscan.Reporter.report_aips_by_puid import get_format_string_from_puid
EXPECTED_CSV_ORIGINAL = (
b"AIP Name,UUID,Count,Size\r\nTest AIP,111111111111-1111-1111-11111111,1,1.0 kB\r\n"
)
EXPECTED_CSV_PRESERVATION = (
b"AIP Name,UUID,Count,Size\r\nTest AIP,111111111111-1111-1111-11111111,1,2.0 kB\r\n"
)
FILE_WITH_FORMAT_ONLY = File(
uuid=uuid.uuid4(),
name="test.txt",
size=12345,
aip_id=2,
file_type=FileType.original,
file_format="Plain Text File",
puid="x-fmt/111",
filepath="/path/to/file.txt",
date_created=datetime.datetime.now(),
checksum_type="md5",
checksum_value="anotherfakemd5",
)
FILE_WITH_FORMAT_AND_VERSION = File(
uuid=uuid.uuid4(),
name="test.pdf",
size=12345678,
aip_id=1,
file_type=FileType.preservation,
file_format="Acrobat PDF/A - Portable Document Format",
format_version="1b",
puid="fmt/354",
filepath="/path/to/test.pdf",
date_created=datetime.datetime.now(),
checksum_type="md5",
checksum_value="yetanotherfakemd5",
original_file_id=1,
)
FILE_WITH_NO_FORMAT = File(
uuid=uuid.uuid4(),
name="test.txt",
size=12345,
aip_id=2,
file_type=FileType.original,
file_format=None,
puid="x-fmt/111",
filepath="/path/to/file.txt",
date_created=datetime.datetime.now(),
checksum_type="md5",
checksum_value="anotherfakemd5",
)
@pytest.mark.parametrize(
"puid, mock_file, expected_return_value",
[
("x-fmt/111", FILE_WITH_FORMAT_ONLY, "Plain Text File"),
(
"fmt/354",
FILE_WITH_FORMAT_AND_VERSION,
"Acrobat PDF/A - Portable Document Format (1b)",
),
("x-fmt/111", FILE_WITH_NO_FORMAT, None),
("fmt/123", None, None),
],
)
def test_get_format_name_and_version_from_puid(
app_instance, mocker, puid, mock_file, expected_return_value
):
"""Test that helper function returns expected string or None."""
mock_get_file = mocker.patch("sqlalchemy.orm.query.Query.first")
mock_get_file.return_value = mock_file
assert expected_return_value == get_format_string_from_puid(puid)
@pytest.mark.parametrize(
"original_files",
[
# Original files.
("True"),
# Preservation files.
("False"),
],
)
def test_aips_by_puid(app_with_populated_files, original_files):
"""Test that report template renders."""
with current_app.test_client() as test_client:
response = test_client.get(
"/reporter/aips_by_puid/?amss_id=1&puid=fmt/353&original_files={}".format(
original_files
)
)
assert response.status_code == 200
@pytest.mark.parametrize(
"original_files,expected_csv",
[
# Original files.
("True", EXPECTED_CSV_ORIGINAL),
# Preservation files.
("False", EXPECTED_CSV_PRESERVATION),
],
)
def test_aips_by_puid_csv(app_with_populated_files, original_files, expected_csv):
"""Test CSV export."""
with current_app.test_client() as test_client:
response = test_client.get(
"/reporter/aips_by_puid/?amss_id=1&puid=fmt/353&original_files={}&csv=True".format(
original_files
)
)
assert response.status_code == 200
assert (
response.headers["Content-Disposition"]
== "attachment; filename=aips_by_puid_fmt/353.csv"
)
assert response.mimetype == "text/csv"
assert response.data == expected_csv
|
Python
|
CL
|
92a9c3f2dc0298d3b59852e0a2b714a3fcc8d27337a52833a1b4dd810729339e
|
# -*- coding: utf-8 -*-
'''
Created on Wed Aug 19 12:51:57 2015
@author: Innerfunk
'''
# Initialize
import json
import pandas as pd
import rawpi
import util
TOP_FOLDER = 'AP_ITEM_DATASET'
PATCHES = [str(p) for p in util.PATCHES]
QUEUE_DICT = util.QUEUE_DICT
REGIONS = util.REGIONS
REGION_DICT = util.REGION_DICT
REVERSE_DICT = util.REVERSE_REGION_DICT
ALL_SAVE_FILENAME = 'all_matches.csv'
FILTERED_SAVE_FILENAME = 'filtered_matches.csv'
GOOD_RESPONSE = 200
RATE_LIMIT_RESPONSE = 429
COLUMNS = ['match id', 'patch', 'queue', 'region']
KEY = 'PUT RIOT API KEY HERE'
rawpi.set_api_key(KEY)
# Define DataFrame Creation Method
def put_matches_in_dataframe(patch, queue_type, region):
filename = TOP_FOLDER + '/' + patch + '/' + queue_type + \
'/' + region + '.json'
print(filename)
with open(filename) as data_file:
data = json.loads(data_file.read())
matches_frame = pd.DataFrame(index=range(len(data)), columns=COLUMNS)
queue = QUEUE_DICT[queue_type]
matches_frame['match id'] = data
matches_frame['patch'] = patch
matches_frame['queue'] = queue
matches_frame['region'] = REGION_DICT[region]
return matches_frame
# Get All File into One Frame and Save
frame_list = []
for match_properties in [(patch, queue_type, region) for patch in PATCHES
for queue_type in QUEUE_DICT.keys()
for region in REGIONS]:
frame_list.append(put_matches_in_dataframe(*match_properties))
all_frames = pd.concat(frame_list, ignore_index=True)
# Filter Matches
region_frames = all_frames[all_frames['region'].isin([1, 2, 3, 4])]
region_ranked_frames = region_frames[region_frames['queue'] == 'ranked']
all_frames.to_csv(ALL_SAVE_FILENAME, index=False)
region_ranked_frames.to_csv(FILTERED_SAVE_FILENAME, index=False)
|
Python
|
CL
|
2668b3cce5737004d21921c658d9f997b61a3210da4caac82e44f4b8cadd2da8
|
#!/usr/bin/env python
""" Index PubChem Bioassay json files with Elasticsearch or MongoDB"""
from __future__ import print_function
import argparse
import gzip
import json
import os
import struct
import sys
import time
from zipfile import ZipFile
from nosqlbiosets.dbutils import DBconnection
# Document type name for the Elascticsearch or Collection name for MongoDB
DOCTYPE = "bioassay"
INDEX = "pubchem"
# Maximum size of uncompressed files that should be indexed
MaxEntrySize = 256*1024*1024
# Maximum total size of uncompressed files indexed
# before an Elasticsearch _refresh call (~equivalent of database commits)
MaxBulkSize = 4*MaxEntrySize
def getuncompressedsize(filename):
with open(filename, 'rb') as f:
return getuncompressedsize_(f)
def getuncompressedsize_(f):
f.seek(-4, 2)
return struct.unpack('I', f.read(4))[0]
# Read given bioassay json file, index using the index function specified
# If the input file is a folder then iterate over files in the folder
def read_and_index_pubchem_bioassays(infile, es, indexfunc):
print("Reading %s " % infile)
i = 0
t1 = time.time()
if os.path.isdir(infile):
for child in os.listdir(infile):
c = os.path.join(infile, child)
if child.endswith(".zip"):
read_and_index_pubchem_bioassays_zipfile(c, es, indexfunc)
else:
read_and_index_pubchem_bioassays_file(c, es, indexfunc)
i += 1
else:
if infile.endswith(".zip"):
read_and_index_pubchem_bioassays_zipfile(infile, es, indexfunc)
else:
read_and_index_pubchem_bioassays_file(infile, es, indexfunc)
i = 1
t2 = time.time()
print("-- %d files have been processed, in %dms"
% (i, (t2 - t1) * 1000))
return None
# Read given bioassays zip file, index using the index function specified
def read_and_index_pubchem_bioassays_zipfile(zipfile, dbc, indexf):
print("\nProcessing %s " % zipfile)
i = 0
r = 0
with ZipFile(zipfile) as myzip:
for fname in myzip.namelist():
aid = fname[fname.find('/')+1:fname.find(".json")]
with myzip.open(fname) as jfile:
# TODO: gzip.open() doesn't work with python2
f = gzip.open(jfile, 'rt') # read as text, input to json.load
r = index_bioassay(dbc, f, r, aid, indexf)
i += 1
return i
# Read given bioassay file, index using the index function specified
def read_and_index_pubchem_bioassays_file(infile, dbc, indexfunc):
if infile.endswith(".json.gz"):
print(getuncompressedsize(infile))
f = gzip.open(infile, 'rt')
elif infile.endswith(".json"):
f = open(infile, 'r')
else:
print('Unsupported file extension; %s' % infile)
return
aid = infile[infile.rfind('/') + 1:infile.find(".json")]
r = index_bioassay(dbc, f, 0, aid, indexfunc)
return r
# Return given date in format YY-MM-DD
def update_date(date):
d = "{}-{:02}-{:02}".format(date["std"]["year"], date["std"]["month"],
date["std"]["day"])
del (date["std"])
return d
def update_dates(doc):
for data in doc["PC_AssaySubmit"]["data"]:
date = data["date"]
d = update_date(date)
data["date"] = d
db = doc["PC_AssaySubmit"]["assay"]["descr"]["aid_source"]["db"]
if "date" in db:
d = update_date(db["date"])
doc["PC_AssaySubmit"]["assay"]["descr"]["aid_source"]["db"]["date"] = d
return
def index_bioassay(es, f, r, aid_, indexf):
doc = json.load(f)
if f.tell() < MaxEntrySize:
update_dates(doc)
aid = doc['PC_AssaySubmit']['assay']['descr']['aid']['id']
if str(aid) != aid_:
print("File name and assay ids not same, please check '%s' vs '%s'"
% (aid, aid_))
return r
r = indexf(es, f, r, aid_, doc)
else:
print("Large entry: aid=%s filesize=%d max-entry-size=%d" %
(aid_, f.tell(), MaxEntrySize))
return r
def es_index_bioassay(dbc, f, r, aid, doc):
try:
if r > 0 and (r + f.tell() > MaxBulkSize):
print("r", end='', file=sys.stdout)
sys.stdout.flush()
# refresh/commit to avoid Elasticsearch out-of-memory errors
dbc.es.indices.refresh(index=args.index)
dbc.es.indices.clear_cache(index=args.index)
r = 0
print(".", end='', file=sys.stdout)
sys.stdout.flush()
docx = doc['PC_AssaySubmit']
dbc.es.index(index=dbc.index, doc_type=DOCTYPE,
id=aid, body=docx)
r += f.tell()
except Exception as e:
print(e)
return r
def mongodb_index_bioassay(dbc, f, r, aid, doc):
try:
if r > 0 and (r + f.tell() > MaxBulkSize):
print("r", end='', file=sys.stdout)
sys.stdout.flush()
r = 0
print(".", end='', file=sys.stdout)
sys.stdout.flush()
docx = doc['PC_AssaySubmit']
dbc.mdbi[DOCTYPE].update({"_id": aid}, docx, upsert=True)
r += f.tell()
except Exception as e:
print(e)
return r
def main(db, infile, index=INDEX, host=None, port=None):
if db == 'Elasticsearch':
d = os.path.dirname(os.path.abspath(__file__))
cfg = json.load(open(d + "/../../mappings/pubchem-bioassays.json", "r"))
dbc = DBconnection(db, index, host, port, recreateindex=True,
es_indexmappings=cfg["mappings"])
read_and_index_pubchem_bioassays(infile, dbc, es_index_bioassay)
dbc.es.indices.refresh(index=index)
else:
dbc = DBconnection(db, index, host, port)
read_and_index_pubchem_bioassays(infile, dbc,
mongodb_index_bioassay)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Index PubChem Bioassays json files'
' with Elasticsearch or MongoDB')
parser.add_argument('--infile', '--infolder',
help='Input file to index, or input folder with '
'zipped bioassay json files')
parser.add_argument('--index',
default=INDEX,
help='Name of Elasticsearch index or MongoDB database')
parser.add_argument('--host',
help='Elasticsearch/MongoDB server hostname')
parser.add_argument('--port',
help="Elasticsearch/MongoDB server port")
parser.add_argument('-db', '--db', default='Elasticsearch',
help="Database: 'Elasticsearch' or 'MongoDB'")
args = parser.parse_args()
main(args.db, args.infile, args.index, args.host, args.port)
|
Python
|
CL
|
eb758b780a1ab0f63b0fdae24d850f3bc16c38ee9cbd247473b039e6e9f7201b
|
# -*- coding: utf-8 -*-
"""
@author: alexyang
@contact: alex.yang0326@gmail.com
@file: keras_han_model.py
@time: 2019/2/8 13:22
@desc:
"""
from keras.models import Model
from keras.layers import Input, Embedding, Dense, Bidirectional, GRU, Masking, TimeDistributed
from models.keras_base_model import KerasBaseModel
from layers.attention import SelfAttention
class HAN(KerasBaseModel):
def __init__(self, config, **kwargs):
super(HAN, self).__init__(config, **kwargs)
def build(self):
input_text = Input(shape=(self.max_len, ))
sent_encoded = self.word_encoder()(input_text) # word encoder
sent_vector = SelfAttention(bias=True)(sent_encoded) # word attention
dense_layer = Dense(256, activation='relu')(sent_vector)
if self.config.loss_function == 'binary_crossentropy':
output = Dense(1, activation='sigmoid')(dense_layer)
else:
output = Dense(self.n_class, activation='softmax')(dense_layer)
model = Model(input_text, output)
model.compile(loss=self.config.loss_function, metrics=['acc'], optimizer=self.config.optimizer)
return model
def word_encoder(self):
input_words = Input(shape=(self.max_len,))
word_vectors = Embedding(input_dim=self.word_embeddings.shape[0], output_dim=self.word_embeddings.shape[1],
weights=[self.word_embeddings], mask_zero=True,
trainable=self.config.word_embed_trainable)(input_words)
sent_encoded = Bidirectional(GRU(self.config.rnn_units, return_sequences=True))(word_vectors)
return Model(input_words, sent_encoded)
|
Python
|
CL
|
2129a9a80af7af75197396ac39dcc5b6ae7cf1b5e36ef08c6ac1a5755e177929
|
def CpG(sequence, size=200):
"""
The Sequence Manipulation Suite: CpG Islands
Results for 1200 residue sequence "sample sequence" starting "taacatactt".
CpG islands search using window size of 200.
Range, value
32 to 231, the y-value is 1.75 and the %GC content is 50.5
33 to 232, the y-value is 1.75 and the %GC content is 50.5
Gardiner-Garden M, Frommer M. J Mol Biol. 1987 Jul 20;196(2):261-82.
"""
pass
|
Python
|
CL
|
01918db2a3df953a5db0fb9f2f3b883b25526df3fef613cec11b340c9d3949b6
|
"""DECaLS"""
import numpy as np
from astropy import units, io, utils
from astropy.table import Table
from frb.surveys import dlsurvey
from frb.surveys import catalog_utils
# Dependencies
try:
from pyvo.dal import sia
except ImportError:
print("Warning: You need to install pyvo to retrieve DECaL images")
_svc = None
else:
_DEF_ACCESS_URL = "https://datalab.noao.edu/sia/des_dr1"
_DEF_ACCESS_URL = "https://datalab.noao.edu/sia/ls_dr7"
_svc = sia.SIAService(_DEF_ACCESS_URL)
# Define the Photometric data model for DECaL
photom = {}
photom['DECaL'] = {}
DECaL_bands = ['g', 'r', 'z', 'W1', 'W2', 'W3', 'W4']
for band in DECaL_bands:
if "W" not in band:
bandstr = 'DECaL_'+band
else:
bandstr = 'WISE_'+band
photom['DECaL'][bandstr] = 'mag_{:s}'.format(band.lower())
photom['DECaL'][bandstr+"_err"] = 'snr_{:s}'.format(band.lower())
photom['DECaL']['DECaL_ID'] = 'decals_id'
photom['DECaL']['ra'] = 'ra'
photom['DECaL']['dec'] = 'dec'
photom['DECaL']['DECaL_brick'] = 'brickid'
class DECaL_Survey(dlsurvey.DL_Survey):
"""
Class to handle queries on the DECaL survey
Child of DL_Survey which uses datalab to access NOAO
Args:
coord (SkyCoord): Coordiante for surveying around
radius (Angle): Search radius around the coordinate
"""
def __init__(self, coord, radius, **kwargs):
dlsurvey.DL_Survey.__init__(self, coord, radius, **kwargs)
self.survey = 'DECaL'
self.bands = ['g', 'r', 'z']
self.svc = _svc # sia.SIAService("https://datalab.noao.edu/sia/ls_dr7")
self.qc_profile = "default"
def get_catalog(self, query=None, query_fields=None, print_query=False,exclude_gaia=False,**kwargs):
"""
Grab a catalog of sources around the input coordinate to the search radius
Args:
query: SQL query
query_fields (list, optional): Over-ride list of items to query
exclude_gaia (bool,optional): If the field 'gaia_pointsource' is present and is 1,
remove those objects from the output catalog.
print_query (bool): Print the SQL query generated
Returns:
astropy.table.Table: Catalog of sources returned
"""
# Query
main_cat = super(DECaL_Survey, self).get_catalog(query_fields=query_fields, print_query=print_query,**kwargs)
main_cat = Table(main_cat,masked=True)
#
for col in main_cat.colnames:
main_cat[col].mask = np.isnan(main_cat[col])
#Convert SNR to mag error values.
snr_cols = [colname for colname in main_cat.colnames if "snr" in colname]
for col in snr_cols:
main_cat[col].mask = main_cat[col]<0
main_cat[col] = 2.5*np.log10(1+1/main_cat[col])
main_cat = main_cat.filled(-99.0)
#Remove gaia objects if necessary
if exclude_gaia:
self.catalog = main_cat[main_cat['gaia_pointsource']==0]
else:
self.catalog = main_cat
# Clean
main_cat = catalog_utils.clean_cat(main_cat, photom['DECaL'])
self.validate_catalog()
# Return
return self.catalog
def _parse_cat_band(self, band):
"""
Internal method to generate the bands for grabbing
a cutout image
Args:
band (str): Band desired
Returns:
list, list, str: Table columns, Column values, band string for cutout
"""
if band is 'g':
bandstr = "g DECam SDSS c0001 4720.0 1520.0"
elif band is 'r':
bandstr = "r DECam SDSS c0002 6415.0 1480.0"
elif band is 'z':
bandstr = "z DECam SDSS c0004 9260.0 1520.0"
table_cols = ['prodtype']
col_vals = ['image']
return table_cols, col_vals, bandstr
def _gen_cat_query(self,query_fields=None):
"""
Generate SQL query for catalog search
self.query is modified in place
Args:
query_fields (list): Override the default list for the SQL query
"""
if query_fields is None:
object_id_fields = ['decals_id','brick_primary','brickid','ra','dec','gaia_pointsource']
mag_fields = ['mag_g','mag_r','mag_z','mag_w1','mag_w2','mag_w3','mag_w4']
snr_fields = ['snr_g','snr_r','snr_z','snr_w1','snr_w2','snr_w3','snr_w4']
query_fields = object_id_fields+mag_fields+snr_fields
database = "ls_dr7.tractor"
self.query = dlsurvey._default_query_str(query_fields, database, self.coord, self.radius)
def _select_best_img(self,imgTable, verbose=True, timeout=120):
"""
Select the best band for a cutout
Args:
imgTable: Table of images
verbose (bool): Print status
timeout (int or float): How long to wait before timing out, in seconds
Returns:
HDU: header data unit for the downloaded image
"""
row = imgTable[0]
url = row['access_url'].decode()
if verbose:
print('downloading image...')
imagedat = io.fits.open(utils.data.download_file(
url,cache=True,show_progress=False,timeout=timeout))
return imagedat
|
Python
|
CL
|
8ddc56df8e6acb4883e6bba94b45abb7c0a8f612499ec11ad2e69a329532e349
|
import sys
import json
import lib.args
import lib.help
import lib.util
import lib.style
import lib.brayns
import lib.config
import lib.process
import traceback
try:
print(lib.style.box("SSCx portal movie maker version 0.1.0"))
args = lib.args.parse()
if "help" in args.flags:
lib.help.show()
sys.exit(0)
if args.config == None:
raise ValueError("Configuration filename is missing!")
cfg = lib.config.parse(args.config, args.flags)
if "preview" in args.flags:
print(lib.style.info("No connection to Brayns: ", "preview mode!"))
brayns = None
else:
brayns = lib.brayns.Brayns(cfg)
if "test" in args.flags:
cfg["firstMovieToProcess"] = 0
lib.process.exec(cfg, brayns, args.flags)
else:
while lib.process.exec(cfg, brayns, args.flags):
lib.util.save_file_content(args.config, json.dumps(cfg, indent=4))
print(lib.style.info("Script has finished ", "successfuly!"))
except Exception as ex:
print(f"""
{lib.style.error(str(ex))}
Usage:
$ python3 {sys.argv[0]} config.json
$ python3 {sys.argv[0]} --help
""")
print("List of available flags:")
for flag in [
["debug", "Print out stack trace on errors."],
["help", "Get detailed info about the configuration file."],
["preview", "Skip the Brayns part and go to compositing only."],
["reset", "Reset \"firstMovieToProcess\" to 0"],
["test", "Process only the first movie."]
]:
(name, desc) = flag
print(lib.style.flag(f" --{name}", desc))
print()
if "--debug" in sys.argv:
print(lib.style.red(traceback.format_exc()))
print()
sys.exit(1)
|
Python
|
CL
|
bda0e2322e6bbe4994048f1bd1d1ba1aaf39d1604991fe5fe1404dd14a8b0ca0
|
"""
Copied from: https://github.com/jrieke/traingenerator
Update index.html from streamlit by
- adding tracking code for Google Analytics
- adding meta tags for search engines
- adding meta tags for social preview
WARNING: This changes your existing streamlit installation (specifically the file
static/index.html in streamlit's main folder). It should only be called once after
installation, so this file doesn't get cluttered!
The tag from Google Analytics (G-XXXXXXXXXX) has to be stored in an environment variable
GOOGLE_ANALYTICS_TAG (or in a .env file).
"""
import os
import sys
import streamlit as st
def replace_in_file(filename, oldvalue, newvalue):
"""Replace string in a file and optionally create backup_filename."""
# Read in the file
with open(filename, "r") as f:
filedata = f.read()
# Replace the target string
filedata = filedata.replace(oldvalue, newvalue)
# Write the file out again
with open(filename, "w") as f:
f.write(filedata)
# Find path to streamlit's index.html.
st_dir = os.path.dirname(st.__file__)
index_filename = os.path.join(st_dir, "static", "index.html")
# Insert tracking code for Google Analytics.
tag = os.getenv("GOOGLE_ANALYTICS_TAG")
if not tag:
print("No tag provided, analytics is deactivated")
sys.exit(1)
tracking_code = f"""<!-- Global site tag (gtag.js) - Google Analytics --><script async src="https://www.googletagmanager.com/gtag/js?id={tag}"></script><script>window.dataLayer = window.dataLayer || []; function gtag(){{dataLayer.push(arguments);}} gtag('js', new Date()); gtag('config', '{tag}');</script>"""
clarity_tag = os.getenv("CLARITY_TAG")
if clarity_tag:
# Add clarity tracking code
clarity_tracking_code = f"""
<script type="text/javascript">
(function(c,l,a,r,i,t,y){{
c[a]=c[a]||function(){{(c[a].q=c[a].q||[]).push(arguments)}};
t=l.createElement(r);t.async=1;t.src="https://www.clarity.ms/tag/"+i;
y=l.getElementsByTagName(r)[0];y.parentNode.insertBefore(t,y);
}})(window, document, "clarity", "script", "{clarity_tag}");
</script>
"""
tracking_code += clarity_tracking_code
size_before = os.stat(index_filename).st_size
replace_in_file(index_filename, "<head>", "<head>" + tracking_code)
size_after = os.stat(index_filename).st_size
# print("Inserted tracking code into:", index_filename)
# print("Size before:", size_before)
# print("Size after: ", size_after)
# Insert meta tags for search & social preview.
# Older info but good summary: https://css-tricks.com/essential-meta-tags-social-media/
# 2020 info: https://stackoverflow.com/questions/19778620/provide-an-image-for-whatsapp-link-sharing
META_TAGS = """
<!-- Meta tags for search engines -->
<meta name="description" content="Python functions with superpowers. Instantly deploy simple functions with REST API, UI, and more.">
<!-- Meta tags for social preview -->
<meta property="og:title" content="Opyrator Playground">
<meta property="og:description" content="Python functions with superpowers">
<meta property="og:url" content="https://github.com/ml-tooling/opyrator">
<meta property="og:site_name" content="Opyrator Playground">
<meta name="twitter:image:alt" content="Opyrator Playground">
"""
# <meta name="twitter:card" content="summary_large_image">
# <meta property="og:image" content="https://github.com/jrieke/traingenerator/raw/main/docs/assets/social-preview-tiny.png">
size_before = os.stat(index_filename).st_size
replace_in_file(index_filename, "<head>", "<head>" + META_TAGS)
size_after = os.stat(index_filename).st_size
# print("Inserted meta tags into:", index_filename)
# print("Size before:", size_before)
# print("Size after: ", size_after)
|
Python
|
CL
|
57a8e3e897bf8e631c4792e84621efc7549b698c1ec19ba88b5bc69fd9ff0464
|
from comet_ml import Experiment as ex
from deoxys.experiment import Experiment
from deoxys.utils import read_file
from deoxys.model import model_from_full_config
import matplotlib.pyplot as plt
from deoxys.loaders.architecture import BaseModelLoader
from tensorflow.keras.models import Model as KerasModel
from tensorflow.keras.layers import Input, concatenate
from tensorflow.keras.models import Sequential
from tensorflow import image
import tensorflow as tf
from tensorflow.keras.callbacks import TensorBoard
import tensorboard
import numpy as np
import scipy.ndimage
import math
from deoxys.model.layers import layer_from_config
from deoxys.customize import custom_architecture
import os
import h5py
if __name__ == '__main__':
#ex_comet = ex(api_key="zoPcSaPo6mhKthsM8SOcgq9Uk",project_name="masterthesisafreen", workspace="afreen3010")
#
config = read_file('json/layer_32_filter_new.json')
experiment = Experiment()
#
#from pdb import set_trace; set_trace()
experiment.from_full_config(config).run_experiment(train_history_log=True,
model_checkpoint_period=10,epochs=100).plot_performance()
|
Python
|
CL
|
b26b895592b331d61a3692a879724ba5673001417fe6eca5b3e2225af4adb54a
|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from pathlib import Path
from model_navigator.converter import ConversionConfig, Converter
from model_navigator.model import Format, ModelConfig
from model_navigator.results import State
from model_navigator.utils import Workspace
def test_converter_return_src_model_if_it_matches_conversion_set():
with tempfile.TemporaryDirectory() as tmp_dir:
workspace_dir = Path(tmp_dir) / "navigator_workspace"
workspace = Workspace(workspace_dir)
converter = Converter(workspace=workspace, verbose=True)
src_model = ModelConfig(model_name="MyModel", model_path=Path("tests/files/models/identity.savedmodel"))
conversion_config = ConversionConfig(target_format=Format.TF_SAVEDMODEL)
conversion_results = converter.convert(src_model=src_model, conversion_config=conversion_config)
conversion_results = list(conversion_results)
assert len(conversion_results) == 1
result = conversion_results[0]
assert result.status.state == State.SUCCEEDED, result.status.message
assert result.output_model.path == src_model.model_path
src_model = ModelConfig(model_name="MyModel", model_path=Path("tests/files/models/identity.onnx"))
conversion_config = ConversionConfig(target_format=Format.ONNX)
conversion_results = converter.convert(src_model=src_model, conversion_config=conversion_config)
conversion_results = list(conversion_results)
assert len(conversion_results) == 1
result = conversion_results[0]
assert result.status.state == State.SUCCEEDED, result.status.message
assert result.output_model.path == src_model.model_path
src_model = ModelConfig(model_name="MyModel", model_path=Path("tests/files/models/identity.traced.pt"))
conversion_config = ConversionConfig(target_format=Format.TORCHSCRIPT)
conversion_results = converter.convert(src_model=src_model, conversion_config=conversion_config)
conversion_results = list(conversion_results)
assert len(conversion_results) == 1
result = conversion_results[0]
assert result.status.state == State.SUCCEEDED, result.status.message
assert result.output_model.path == src_model.model_path
|
Python
|
CL
|
d695fec782f1f4463847d18b352689db9b3f0475f86b5f12d98c53cc8fd34051
|
from rassh.commands.ssh_bonaire_command_base import SSHBonaireCommandBase
from rassh.datatypes.well_formed_command import WellFormedCommand
from rassh.commands import bonaire_functions
class SSHBonaireESSID(SSHBonaireCommandBase):
def __init__(self, expect_manager):
SSHBonaireCommandBase.__init__(self, expect_manager)
self.object_type = 'ssid_profile'
self.command_name = 'essid'
self.payload = ['essid', ]
def run_get_command(self, cmd: WellFormedCommand):
lines = self.expect_command(self.ssh_manager.master_controller,
"show wlan ssid-profile " + cmd.request_dict['target']
+ " | include ESSID", cmd)
essid = None
for line in lines:
if line.startswith("ESSID"):
essid = line.replace("ESSID", "", 1).strip()
if line.startswith("SSID Profile"):
# """SSID Profile "fake_ssid_profile" undefined."""
return "Error: SSID profile not found", 404
if not essid:
return "Error: could not parse ESSID", 500
return essid, 200
def run_put_command(self, cmd: WellFormedCommand):
length = len(cmd.request_dict['params']['essid'])
if length < 1 or length > 32:
return "Error: ESSID must be between 1 and 32 characters (check failed).", 501
_ = self.expect_command(self.ssh_manager.master_controller, "configure t", cmd)
_ = self.expect_command(self.ssh_manager.master_controller,
"wlan ssid-profile " + cmd.request_dict['target'], cmd)
lines = self.expect_command(self.ssh_manager.master_controller, 'essid "'
+ cmd.request_dict['params']['essid'] + '"', cmd)
for line in lines:
if line.startswith("% Invalid input detected at '^' marker."):
# You see this if attempting to set an ESSID which is too short or long (for example).
# This should have been handled by the earlier check, so 500 is appropriate.
bonaire_functions.end_end(self, cmd)
return "Server Error: ESSID must be between 1 and 32 characters (failed).", 500
bonaire_functions.end_end(self, cmd)
_ = self.expect_command(self.ssh_manager.master_controller, "write mem", cmd)
return cmd.request_dict['params']['essid'], 204
|
Python
|
CL
|
bce977ff23ef2d1c3554f78f9c11e84c88363bb71e109f65cf0eb824bb73971a
|
from pyspark.sql.types import *
from pyspark.sql.functions import *
from itertools import chain
import numpy as np
from pyspark.ml.feature import Imputer
from pyspark.sql import Window
# fill missing value using mean or median
def fill_missing(df,strategy='mean', missingValue=np.nan):
"""
Fill missing value using statistical methods like mean and median
parameters
----------
df: pyspark.sql.dataframe.DataFrame
strategy: str, default 'mean'
strategy will be used to impute missing values
missingValue: str,int,float,bool,np.nan, default np.nan
The value will be considered as missing value
returns
-------
imp: pyspark.ml.feature.Imputer
transformed df: pyspark.sql.dataframe.DataFrame
missing value imputed dataframe
"""
c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']
imp = Imputer(inputCols=c_name,outputCols=c_name,strategy=strategy, missingValue=missingValue).fit(df)
return imp,imp.transform(df)
# convert string to lower or upper case
def case_convertion(df,l):
"""
convert the str column to lower or upper case
parameters
----------
df: pyspark.sql.dataframe.DataFrame
l: dict
dictonary of the columns name and case to b converted, the value of case must be 'lower' or 'upper'. e.g. {'ST_NAME':'lower'}
return
------
pyspark.sql.dataframe.DataFrame
"""
for i,case in l.items():
if case=='lower':
df = df.withColumn(i,lower(col(i).alias(i)))
elif case=='upper':
df = df.withColumn(i,upper(col(i).alias(i)))
return df
# replace value in column
def replace(df,d,col_name,keep=False):
"""
replace value in dataframe column
parameter
---------
df: pyspark.sql.dataframe.DataFrame
d: dictonary
key value pair of the value and new value. e.g. {'Y':1,'N':0}
col_name: str
column name in the data frame in which replacement needs to be done
keep: bool, default False
if False the value not present in the dictonary will be replaced by null
returns
-------
pyspark.sql.dataframe.DataFrame
"""
mapping = create_map([lit(x) for x in chain(*d.items())])
if keep:
df = df.withColumn(col_name,coalesce(mapping[df[col_name]],df[col_name]).alias(col_name))
else:
df = df.withColumn(col_name,mapping[df[col_name]].alias(col_name))
return df
# change col type
def change_col_type(df,schema):
"""
Change the type of the column
parameters
----------
df: pyspark.sql.dataframe.DataFrame
schema: dict
key value pair of the column name and type of the column, the type of the columns must be 'int','str','float' or 'bool'
e.g. {'PID':'int','ST_NUM':'int','NUM_BEDROOMS':'int','NUM_BATH':'int','SQ_FT':'int','OWN_OCCUPIED':'int'}
returns
-------
pyspark.sql.dataframe.DataFrame
"""
d = {'int':IntegerType(),'str':StringType(),'float':FloatType(),'bool':BooleanType()}
for c,t in schema.items():
df = df.withColumn(c,col(c).cast(d[t]))
return df
# find outlier
def detect_outlier(df,method='iqr',val=np.nan):
"""
detect outlier in the dataframe and replace them
parameter
---------
df: pyspark.sql.dataframe.DataFrame
method: str, default 'iqr'
method for outlier detection, which include 'z_score','iqr' and 'std'
val: str,int,float,bool,np.nan, default np.nan
value to be replaced instead of the outlier
returns
-------
pyspark.sql.dataframe.DataFrame
"""
c_name = [n for n,d in df.dtypes if d != 'string' and d != 'boolean']
if method=='z_score':
for i in c_name:
stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()
m = stat[0]['mean']
s = stat[0]['std']
df = df.withColumn(i,when(abs((col(i)-m)/s)>thresh,val).otherwise(col(i)))
elif method=='iqr':
for i in c_name:
q1,q3 = df.approxQuantile(i,[0.25,0.75],0)
IQR = q3-q1
lo = q1-(1.5*IQR)
up = q3+(1.5*IQR)
df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))
elif method=='std':
for i in c_name:
stat = df.select(mean(col(i)).alias('mean'),stddev(col(i)).alias('std')).collect()
m = stat[0]['mean']
s = stat[0]['std']*thresh
lo = m - s
up = m + s
df = df.withColumn(i,when(col(i).between(lo,up), col(i)).otherwise(val))
return df
# split column by regular expression
def split_column(df,col_name,reg_ex=',',keep=False):
"""
split single column into multiple columns using regular expression
parameters
----------
df: pyspark.sql.dataframe.DataFrame
col_name: str
name of the column to be split in dataframe
reg_ex: str
regular expression for spliting
keep: bool, default False
remove original column if set to False
returns
-------
pyspark.sql.dataframe.DataFrame
"""
# https://stackoverflow.com/a/51680292/5847441
df = df.select(col_name,posexplode(split(col_name,reg_ex)).alias('pos','val'))\
.select(col_name,concat(lit(col_name),col('pos').cast('string')).alias('name'),'val')\
.groupBy(col_name).pivot('name').agg(first('val'))
if keep:
return df
else:
return df.drop(col_name)
# trim space
def trim_space(df,col_name):
"""
trim extra space from starting and ending in the column
parameters
----------
df: pyspark.sql.dataframe.DataFrame
col_name: str
column name in the dataframe
returns
-------
pyspark.sql.dataframe.DataFrame
"""
return df.withColumn(col_name,trim(col_name))
# count of unique value in column
def get_counts(df,col_name):
"""
Give count of each unique value in the column of the dataframe
parameters
----------
df: pyspark.sql.dataframe.DataFrame
col_name: str
column name in the dataframe
returns
-------
pyspark.sql.dataframe.DataFrame
"""
return df.groupBy(col_name).count().show()
# remove column
def remove_column(df,col_name):
"""
Remove columns from dataframe
parameters
----------
df: pyspark.sql.dataframe.DataFrame
col_name: str
column name in the dataframe
returns
-------
pyspark.sql.dataframe.DataFrame
"""
return df.drop(col_name)
# drop duplicates
def drop_dups(df,col_names=None):
"""
drop duplicate rows from the dataframe
parameters
----------
df: pyspark.sql.dataframe.DataFrame
col_name: str
column name in the dataframe
returns
-------
pyspark.sql.dataframe.DataFrame
"""
return df.dropDuplicates()
# insert column
def add_column(df,col_name,use_func=False,func=None,data=None):
"""
Add new colunm to the dataframe
parameters
----------
df: pyspark.sql.dataframe.DataFrame
col_name: str
column name in the dataframe
use_func: bool, default False
if set to True it will add new column using the function provided in func argument
else the list or np.ndarray will be added in the dataframe which user have to provide in data argument
func: function
It will be used with use_func argument, e.g. exp('SQ_FT'), log('SQ_FT')
data: list, np.ndarray
data to be filled in new column
returns
-------
pyspark.sql.dataframe.DataFrame
"""
if use_func:
df = df.withColumn(col_name,func)
else:
if type(data)==np.ndarray:
data = data.tolist()
a = spark.createDataFrame([(i,) for i in data],[col_name])
a = a.withColumn("row_idx", row_number().over(Window.orderBy(monotonically_increasing_id())))
df = df.withColumn("row_idx", row_number().over(Window.orderBy(monotonically_increasing_id())))
df = df.join(a,df.row_idx==a.row_idx).drop('row_idx')
return df
|
Python
|
CL
|
b6c03659cb1b043fdaf5eb7f84e361af065beda932c0fad86e92c6945e20d4e3
|
import os
from my_app import utils
from my_app.app import app
from flask import jsonify, request, make_response
@app.route('/')
def home():
"""Create a Flask backend API for Whatsapp
The Whatsapp API should contain the following views:
1. GET messages between user1 and user 2
2. POST a message from user1 to user2
3. PATCH Edit the previous message from the user.
4. Start chatting with a new user
"""
return "Welcome to the WhatsApp API"
@app.route('/sign_up', methods=['POST'])
def sign_up():
request_body = request.get_json()
# check inputs: format should be correct and user should not already exist
if 'username' not in request_body:
return make_response(jsonify(error="The body must contain 'username' for sign up."), 400)
username = request_body['username']
if utils.user_exists(username):
return make_response(jsonify(error=f"Username {username} already exists"), 400)
# create a new directory for the user
user_notes_folder = f'notes/{username}'
try:
os.makedirs(user_notes_folder)
return make_response(jsonify(message='ok'), 200)
except Exception as e:
print(f'error: {str(e)}')
return make_response(jsonify(error=str(e)), 500)
@app.route('/get-message/<string:username>', methods=['GET'])
def get_message(username):
# check input format
request_body = request.get_json()
user_notes_folder = f'notes/{username}'
note_name = request['note_name']
try:
stored_file = open(f'{user_notes_folder}/{note_name}.txt', 'r')
note_content = stored_file.read()
stored_file.close()
return make_response(jsonify(text=note_content), 200)
except Exception as e:
print(f'error: {str(e)}')
return make_response(jsonify(error=str(e)), 500)
@app.route('/modify-message/<string:username>', methods=['PUT'])
def modify_message(username):
# take new text as input
request_body = request.get_json()
if 'text' not in request_body:
return make_response(jsonify(error="The body must contain 'text' for modify note."), 400)
user_notes_folder = f'notes/{username}'
note_name = request['note_name']
text = request_body['text']
# update the file stored locally
try:
stored_file = open(f'{user_notes_folder}/{note_name}.txt', 'w')
stored_file.write(text)
stored_file.close()
return make_response(jsonify(message='ok'), 200)
except Exception as e:
print(f'error: {str(e)}')
return make_response(jsonify(error=str(e)), 500)
|
Python
|
CL
|
cda68dd552ffe6c7971e17b965ce7b4ef00718325a7bbb6907fef3891c9ccb72
|
# The MIT License (MIT)
#
# Copyright (c) 2019 Looker Data Sciences, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""AuthToken
"""
from typing import Optional, Type, Union
import datetime
import attr
from looker_sdk.rtl import auth_session
from looker_sdk.rtl import model
# Same as the Looker API access token object
# Re-declared here to be independent of model generation
@attr.s(auto_attribs=True, init=False)
class AccessToken(model.Model):
"""
Attributes:
access_token: Access Token used for API calls
token_type: Type of Token
expires_in: Number of seconds before the token expires
refresh_token: Refresh token which can be used to obtain a new access token
"""
access_token: Optional[str] = None
token_type: Optional[str] = None
expires_in: Optional[int] = None
refresh_token: Optional[str] = None
def __init__(
self,
*,
access_token: Optional[str] = None,
token_type: Optional[str] = None,
expires_in: Optional[int] = None,
refresh_token: Optional[str] = None,
):
self.access_token = access_token
self.token_type = token_type
self.expires_in = expires_in
self.refresh_token = refresh_token
class AuthToken:
"""Used to instantiate or check expiry of an AccessToken object"""
def __init__(
self, token: Optional[AccessToken] = None,
):
self.lag_time = 10
self.access_token: str = ""
self.refresh_token: str = ""
self.token_type: str = ""
self.expires_in: int = 0
self.expires_at = datetime.datetime.now() + datetime.timedelta(
seconds=-self.lag_time
)
if token is None:
token = AccessToken()
self.set_token(token)
def set_token(self, token: AccessToken):
"""Assign the token and set its expiration."""
self.access_token = token.access_token or ""
if isinstance(token, AccessToken):
self.refresh_token = token.refresh_token or ""
self.token_type = token.token_type or ""
self.expires_in = token.expires_in or 0
lag = datetime.timedelta(seconds=-self.lag_time)
if token.access_token and token.expires_in:
lag = datetime.timedelta(seconds=token.expires_in - self.lag_time)
self.expires_at = datetime.datetime.now() + lag
@property
def is_active(self) -> bool:
"""True if authentication token has not timed out"""
if not self.expires_at:
return False
return self.expires_at > datetime.datetime.now()
|
Python
|
CL
|
7511cd9b064f733619d4a85141b7f27bad2ba4684740df134a87e9acc3a31b70
|
import os
from setuptools import setup, find_packages
from openspending.version import __version__
PKG_ROOT = '.'
def files_in_pkgdir(pkg, dirname):
pkgdir = os.path.join(PKG_ROOT, *pkg.split('.'))
walkdir = os.path.join(pkgdir, dirname)
walkfiles = []
for dirpath, _, files in os.walk(walkdir):
fpaths = (os.path.relpath(os.path.join(dirpath, f), pkgdir)
for f in files)
walkfiles += fpaths
return walkfiles
def package_filter(pkg):
if pkg in ['openspending.test', 'openspending.test.helpers']:
return True
elif (pkg.startswith('openspending.test') or
pkg.startswith('openspending.ui.test')):
return False
else:
return True
setup(
name='openspending',
version=__version__,
description='OpenSpending',
author='Open Knowledge Foundation',
author_email='okfn-help at lists okfn org',
url='http://github.com/okfn/openspending',
install_requires=[
"WebOb==1.0.8", # Explicitly specify WebOb 1.0.8, as with 1.1
# integration with Pylons is broken:
# see https://gist.github.com/1214075
"Pylons==1.0",
"Genshi==0.6",
"pymongo==1.11",
"repoze.who==2.0b1",
"repoze.who-friendlyform==1.0.8",
"Unidecode==0.04.7",
"python-dateutil==1.5",
"solrpy==0.9.4",
"pyutilib.component.core==4.3.1",
"Babel==0.9.6",
"colander==0.9.3",
"distribute>=0.6.10",
"mock==0.7.2",
"sphinx==1.0.7",
"argparse==1.2.1"
],
setup_requires=[
"PasteScript==1.7.4.2",
"nose==1.1.2"
],
packages=filter(package_filter, find_packages()),
namespace_packages=['openspending', 'openspending.plugins'],
package_data={
'openspending.model': files_in_pkgdir('openspending.model', 'serverside_js'),
'openspending.ui': (
files_in_pkgdir('openspending.ui', 'public') +
files_in_pkgdir('openspending.ui', 'templates')
)
},
test_suite='nose.collector',
zip_safe=False,
paster_plugins=['PasteScript', 'Pylons'],
entry_points={
'paste.app_factory': [
'main = openspending.ui.config.middleware:make_app'
],
'paste.app_install': [
'main = pylons.util:PylonsInstaller'
],
'console_scripts': [
'ostool = openspending.command:main'
]
}
)
|
Python
|
CL
|
50f709a6d4c02b3ff579e87f7e3cc8011c7483b4e7f813f6a29917f017608c46
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This import verifies that the dependencies are available.
import uuid
from metadata.generated.schema.entity.data.database import Database
from metadata.generated.schema.entity.services.databaseService import DatabaseServiceType
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.api.common import IncludeFilterPattern
from metadata.ingestion.models.ometa_table_db import OMetaDatabaseAndTable
import pymysql # noqa: F401
from metadata.generated.schema.entity.data.table import Table, Column
from metadata.ingestion.source.sql_alchemy_helper import SQLAlchemyHelper, SQLSourceStatus
from .sql_source import SQLConnectionConfig
from metadata.ingestion.api.source import Source, SourceStatus
from metadata.ingestion.models.table_metadata import DatabaseMetadata
from itertools import groupby
from typing import Iterator, Union, Dict, Any, Iterable
from collections import namedtuple
from ..ometa.openmetadata_rest import MetadataServerConfig
from ...utils.helpers import get_database_service_or_create
TableKey = namedtuple('TableKey', ['schema', 'table_name'])
class PostgresSourceConfig(SQLConnectionConfig):
# defaults
scheme = "postgresql+psycopg2"
service_name = "postgres"
service_type = "POSTGRES"
def get_service_type(self) -> DatabaseServiceType:
return DatabaseServiceType[self.service_type]
def get_connection_url(self):
return super().get_connection_url()
def get_table_key(row: Dict[str, Any]) -> Union[TableKey, None]:
"""
Table key consists of schema and table name
:param row:
:return:
"""
if row:
return TableKey(schema=row['schema'], table_name=row['name'])
return None
class PostgresSource(Source):
SQL_STATEMENT = """
SELECT
c.table_catalog as cluster, c.table_schema as schema, c.table_name as name, pgtd.description as description
,c.column_name as col_name, c.data_type as col_type
, pgcd.description as col_description, ordinal_position as col_sort_order
FROM INFORMATION_SCHEMA.COLUMNS c
INNER JOIN
pg_catalog.pg_statio_all_tables as st on c.table_schema=st.schemaname and c.table_name=st.relname
LEFT JOIN
pg_catalog.pg_description pgcd on pgcd.objoid=st.relid and pgcd.objsubid=c.ordinal_position
LEFT JOIN
pg_catalog.pg_description pgtd on pgtd.objoid=st.relid and pgtd.objsubid=0
ORDER by cluster, schema, name, col_sort_order
"""
# CONFIG KEYS
WHERE_CLAUSE_SUFFIX_KEY = 'where_clause_suffix'
CLUSTER_KEY = 'cluster_key'
USE_CATALOG_AS_CLUSTER_NAME = 'use_catalog_as_cluster_name'
DATABASE_KEY = 'database_key'
SERVICE_TYPE = 'POSTGRES'
def __init__(self, config, metadata_config, ctx):
super().__init__(ctx)
self.sql_stmt = PostgresSource.SQL_STATEMENT
self.alchemy_helper = SQLAlchemyHelper(config, metadata_config, ctx, "Postgres", self.sql_stmt)
self._extract_iter: Union[None, Iterator] = None
self._database = 'postgres'
self.metadata_config = metadata_config
self.status = SQLSourceStatus()
self.service = get_database_service_or_create(config, metadata_config)
self.pattern = config
self.filter_pattern: IncludeFilterPattern = IncludeFilterPattern.allow_all()
@classmethod
def create(cls, config_dict, metadata_config_dict, ctx):
config = PostgresSourceConfig.parse_obj(config_dict)
metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)
return cls(config, metadata_config, ctx)
def prepare(self):
pass
def _get_raw_extract_iter(self) -> Iterable[Dict[str, Any]]:
"""
Provides iterator of result row from SQLAlchemy helper
:return:
"""
rows = self.alchemy_helper.execute_query()
for row in rows:
yield row
def next_record(self) -> Iterable[DatabaseMetadata]:
"""
Using itertools.groupby and raw level iterator, it groups to table and yields TableMetadata
:return:
"""
for key, group in groupby(self._get_raw_extract_iter(), get_table_key):
columns = []
for row in group:
last_row = row
col_type = ''
if row['col_type'].upper() == 'CHARACTER VARYING':
col_type = 'VARCHAR'
elif row['col_type'].upper() == 'CHARACTER' or row['col_type'].upper() == 'NAME':
col_type = 'CHAR'
elif row['col_type'].upper() == 'INTEGER':
col_type = 'INT'
elif row['col_type'].upper() == 'TIMESTAMP WITHOUT TIME ZONE':
col_type = 'TIMESTAMP'
elif row['col_type'].upper() == 'DOUBLE PRECISION':
col_type = 'DOUBLE'
elif row['col_type'].upper() == 'OID':
col_type = 'NUMBER'
elif row['col_type'].upper() == 'ARRAY':
col_type = 'ARRAY'
elif row['col_type'].upper() == 'BOOLEAN':
col_type = 'BOOLEAN'
else:
col_type = None
if not self.pattern.filter_pattern.included(f'{last_row[1]}.{last_row[2]}'):
self.status.filtered(f'{last_row[1]}.{last_row[2]}', "pattern not allowed", last_row[2])
continue
if col_type is not None:
columns.append(Column(name=row['col_name'], description=row['col_description'],
columnDataType=col_type, ordinalPosition=int(row['col_sort_order'])))
table_metadata = Table(id=uuid.uuid4(), name=last_row['name'],
description=last_row['description'],
columns=columns)
self.status.scanned(table_metadata.name.__root__)
dm = Database(id=uuid.uuid4(),
name=row['schema'],
description=row['description'] if row['description'] is not None else ' ',
service=EntityReference(id=self.service.id, type=self.SERVICE_TYPE))
table_and_db = OMetaDatabaseAndTable(table=table_metadata, database=dm)
yield table_and_db
def close(self):
self.alchemy_helper.close()
def get_status(self) -> SourceStatus:
return self.status
|
Python
|
CL
|
64bfa35de8cfb09ab25f359eaa11965cd2889c12cf3c92c2a61707e65cf513b2
|
import torch
import torch.nn as nn
from models.deep_sets import DeepSet
from models.layers import PsiSuffix
class SetToGraph(nn.Module):
def __init__(self, in_features, out_features, set_fn_feats, method, hidden_mlp, predict_diagonal, attention, cfg=None):
"""
SetToGraph model.
:param in_features: input set's number of features per data point
:param out_features: number of output features.
:param set_fn_feats: list of number of features for the output of each deepsets layer
:param method: transformer method - quad, lin2 or lin5
:param hidden_mlp: list[int], number of features in hidden layers mlp.
:param predict_diagonal: Bool. True to predict the diagonal (diagonal needs a separate psi function).
:param attention: Bool. Use attention in DeepSets
:param cfg: configurations of using second bias in DeepSetLayer, normalization method and aggregation for lin5.
"""
super(SetToGraph, self).__init__()
assert method in ['lin2', 'lin5']
self.method = method
if cfg is None:
cfg = {}
self.agg = cfg.get('agg', torch.sum)
self.set_model = DeepSet(in_features=in_features, feats=set_fn_feats, attention=attention, cfg=cfg)
# Suffix - from last number of features, to 1 feature per entrance
d2 = (2 if method == 'lin2' else 5) * set_fn_feats[-1]
hidden_mlp = [d2] + hidden_mlp + [out_features]
self.suffix = PsiSuffix(hidden_mlp, predict_diagonal=predict_diagonal)
def forward(self, x):
x = x.transpose(2, 1) # from BxNxC to BxCxN
u = self.set_model(x) # Bx(out_features)xN
n = u.shape[2]
if self.method == 'lin2':
m1 = u.unsqueeze(2).repeat(1, 1, n, 1) # broadcast to rows
m2 = u.unsqueeze(3).repeat(1, 1, 1, n) # broadcast to cols
block = torch.cat((m1, m2), dim=1)
elif self.method == 'lin5':
m1 = u.unsqueeze(2).repeat(1, 1, n, 1) # broadcast to rows
m2 = u.unsqueeze(3).repeat(1, 1, 1, n) # broadcast to cols
m3 = self.agg(u, dim=2, keepdim=True).unsqueeze(3).repeat(1, 1, n, n) # sum over N, put on all
m4 = u.diag_embed(dim1=2, dim2=3) # assign values to diag only
m5 = self.agg(u, dim=2, keepdim=True).repeat(1, 1, n).diag_embed(dim1=2, dim2=3) # sum over N, put on diag
block = torch.cat((m1, m2, m3, m4, m5), dim=1)
edge_vals = self.suffix(block) # shape (B,out_features,N,N)
return edge_vals
|
Python
|
CL
|
64f3b3c273f215fae36cbf08bd5adbf39a0d65ca8d4ad97ea2c80f16fdd057ca
|
# extract features from list of text instances based on configuration set of features
import nltk
import numpy
import re
import time
import csv
import math
import pickle
import gensim
import keras
from scipy.spatial.distance import cosine
from keras.layers import Input, Dense, LSTM, RepeatVector, Dropout
from keras.models import Model
from numpy.linalg import svd
from nltk import ngrams
from collections import *
from gensim import corpora
from gensim.models.lsimodel import LsiModel
source_text = []
stemmed_text = []
#model = gensim.models.Word2Vec.load_word2vec_format('GoogleNews-vectors-negative300.bin.gz', binary=True)
def timeit(func):
# Timeit is a function that can be used as a decorator for another function so you can see its run time
def timed(*args, **kwargs):
t1 = time.time()
result = func(*args, **kwargs)
t2 = time.time()
print('Method {} took {} seconds'.format(func.__name__, t2-t1))
return result
return timed
@timeit
def preprocess():
# first stem and lowercase words, then remove rare
# lowercase
global source_text
source_text = [text.lower() for text in source_text]
# tokenize
global tokenized_text
tokenized_text = [nltk.word_tokenize(text) for text in source_text]
pickle.dump(tokenized_text, open('tokenized.pkl', 'wb'), protocol=2)
# POS tag
global tagged_text
tagged_text = [[n[1] for n in nltk.pos_tag(essay)] for essay in tokenized_text]
# Padded text
global padded_sequences
padded_sequences = pad_sentences(tokenized_text)
# stem
porter = nltk.PorterStemmer()
global stemmed_text
stemmed_text = []
for tokens in tokenized_text:
stemmed_line = []
for t in tokens:
try:
stemmed_line.extend([porter.stem(t)])
except IndexError:
stemmed_line.extend('')
stemmed_text.append(stemmed_line)
#stemmed_text = [[porter.stem(t) for t in tokens] for tokens in tokenized_text]
# remove rare
vocab = nltk.FreqDist(w for line in stemmed_text for w in line)
rarewords_list = set(vocab.hapaxes())
stemmed_text = [['<RARE>' if w in rarewords_list else w for w in line] for line in stemmed_text]
# note that source_text will be lowercased, but only stemmed_text will have rare words removed
@timeit
def bag_of_function_words():
bow = []
for sw in nltk.corpus.stopwords.words('english'):
counts = [sum(1 for _ in re.finditer(r'\b%s\b' % sw, line)) for line in source_text]
bow.append(counts)
return bow, nltk.corpus.stopwords.words('english')
# FILL IN OTHER FEATURE EXTRACTORS
# NGRAM FUNCTIONS
@timeit
def compute_ngrams(n, text, pos=False):
# General function that takes a tokenized corpus as input and outputs a list of lists
# with each sublist containing the bigrams in it's equivalent essay
ngs = []
if pos:
text = tagged_text
if n == 1 and not pos:
# List comprehension for handling unigram stripping of stopwords
text = [[w for w in essay if w not in nltk.corpus.stopwords.words('english')] for essay in text]
for essay in text:
# Because ngrams returns a generator, list(ngrams) will return a list of it's elements
ngs.append(list(ngrams(essay, n)))
return ngs, text
@timeit
def ngram_counts(n, top_n, text, pos=False):
# Counts instances of ngrams, and returns the top n ngrams (confusing notation)
if pos:
ngs, text = compute_ngrams(n, text, pos=True)
else:
ngs, _ = compute_ngrams(n, text)
counts = defaultdict(int)
for essay in ngs:
for bg in essay:
counts[bg] += 1
sl = sorted(counts.items(), key=lambda x: x[1], reverse=True) # Sorted list in descending order
top_ngrams = [x[0] for x in sl][:top_n]
bow = []
for ng in top_ngrams:
counts = [float(sum(x == ng for x in ngrams(line, n))) / (len(line)) for line in text]
bow.append(counts)
return bow
def make_ngrams(text, nGramCount):
'''
param text: list w/ sublists, sublists are tokens
param nGramCount: integer indicating ngram to generate
return: dictionary of ngrams, sorted by frequency
'''
ngramCounts = nltk.FreqDist(nltk.ngrams([word for doc in text for word in doc], nGramCount))
return ngramCounts
def most_common(text, n, ngramCount):
'''
param text: list w/sublists, sublists are tokens
param n: integer indicating how many of most common ngrams to return
param ngramCount: integer indication ngram to generate
return: list of n most common ngrams, w/o counts
'''
ngram_freq_dist = make_ngrams(text, ngramCount)
return [x[0] for x in ngram_freq_dist.most_common(n)]
@timeit
def bag_of_character_trigrams():
# Only change is using string instead of tokens
top_500_trigrams = most_common(source_text, 500, 3)
essay_fds = [nltk.FreqDist(ngrams(essay, 3)) for essay in source_text]
trigram_counts = [[essay_fd[tg] for tg in top_500_trigrams] for essay_fd in essay_fds]
return numpy.asarray(trigram_counts).T.tolist()
# COMPLEXITY FUNCTIONS
@timeit
def characters_per_word(text):
feats = []
for essay in text:
counts = [sum([len(w) for w in essay])/len(essay)]
feats.append(counts)
feats = numpy.asarray(feats).T.tolist() # To keep the dimensions consistent with features in extract_features
return feats
@timeit
def words_per_sentence(text):
feats = []
essays = [nltk.sent_tokenize(essay) for essay in text]
for essay in essays:
counts = 0
for sent in essay:
counts += len(sent)
counts = counts/len(essay)
feats.append([counts])
feats = numpy.asarray(feats).T.tolist()
return feats
@timeit
def unique_words_ratio(text):
feats = []
for essay in text:
feats.append([len(set(essay))/len(essay)])
feats = numpy.asarray(feats).T.tolist()
return feats
@timeit
def words_per_sentence(text):
feats = []
for essay in text:
essay = nltk.sent_tokenize(essay)
feats.append([sum([len(sent) for sent in essay])/len(essay)])
feats = numpy.asarray(feats).T.tolist()
return feats
# TOPIC MODELS
@timeit
def lsi_transform(text, n_topics):
dictionary = corpora.Dictionary(text)
corpus = [dictionary.doc2bow(essay) for essay in text]
lsi = LsiModel(corpus=corpus, num_topics=n_topics)
return lsi, dictionary
@timeit
def topic_models(text, n_topics):
# Stopwords are uninformative for topic models
text = [[w for w in essay if w not in nltk.corpus.stopwords.words('english')] for essay in text]
lsi, dictionary = lsi_transform(text, n_topics)
topics = []
for essay in text:
e2i = dictionary.doc2bow(essay)
tps = list([x[1] for x in lsi[e2i]])
topics.append(tps)
topics = numpy.asarray(topics).T.tolist()
return topics
# WORD VECTORS
def transform_sent(sent):
'''
:param sent: list of tokenized words
:return: matrix of vectors corresponding to words in sentence (ones if <PAD> token)
'''
vector = []
for w in sent:
if w == '<PAD>' or w not in model.vocab:
vector.append(numpy.ones((300,)))
else:
vector.append(model[w])
vector = numpy.array(vector)
return vector
def pad_sentences(text):
'''
:param text: list of lists where sublists contain tokenized words
:return: list of lists where sublists contain fixed length sequences of words and <PAD> tokens
'''
maxlen = max([len(sent) for sent in text])
for sent in text:
while len(sent) < maxlen:
sent.append('<PAD>')
return text
def transform_tfidf(sent, weighted):
'''
:param sent: List of tokenized words
:param weighted: Dictionary of weighted word vectors
:return: the matrix where each row is a tf-idf weighted word vector
'''
vector = []
words = [w for w in sent if w in model.vocab]
for w in words:
vector.append(weighted[w])
vector = numpy.array(vector)
return vector
def average_sent(sent):
'''
:param sent: list of tokenized words
:return: averaged word vectors for every word in sent if word is in model vocab
'''
vecs = transform_sent(sent)
mean = numpy.mean(vecs, axis=0)
return mean
def average_tfidf(sent, weights):
'''
:param sent: List of tokenized words
:param weights: Dictionary containing tf-idf weighted word vectors
:return: Averaged word vectors
'''
vecs = transform_tfidf(sent, weights)
mean = numpy.mean(vecs, axis=0)
return mean
def tf(term, doc):
'''
:param term: string of desired term
:param doc: list of tokenized words
:return: number of instances of term in doc regularized by the length
'''
return doc.count(term) / len(doc)
def idf(term, docs):
'''
:param term: string of desired term
:param docs: list of lists where sublists are lists of tokenized words
:return: inverse doc frequency of the term w/r/t a corpus
'''
n_docs_containing = sum(term in d for d in docs)
return math.log(len(docs) / (1 + n_docs_containing))
def tf_idf(term, doc, docs):
'''
:param term: string of desired term
:param doc: list of tokenized words
:param docs: list of lists where sublists are lists of tokenized words
:return: tf-idf score for a term given a document and a corpus
'''
return tf(term, doc) * idf(term, docs)
def tf_idf_generation(text):
'''
:param text: list of lists where sublists are lists of tokenized words
:return: dictionary of text vocab where values are their respective tf-idf scores
'''
doc = []
for sent in text:
doc.append([w for w in sent if w in model.vocab])
weighted = defaultdict(float)
for sent in doc:
for w in sent:
if w not in weighted:
weighted[w] = tf_idf(w, sent, text) * model[w]
return weighted
@timeit
def average_word_vecs(text, tfidf=False):
'''
:param text: text to return averaged word vectors for
:return: averaged word vectors for document in text
'''
features = []
if tfidf:
weights = tf_idf_generation(text)
for doc in text:
if tfidf:
features.append(average_tfidf(doc, weights))
else:
features.append(average_sent(doc))
features = numpy.asarray(features).T.tolist()
return features
def pointwise_mult(sent):
'''
:param sent: list of tokenized words
:return: pointwise mutiplication of the corresponding word vectors
'''
vectors = transform_sent(sent)
vec = vectors[0]
for i in range(1, len(vectors)):
vec = numpy.multiply(vec, vectors[i])
return vec
@timeit
def pointwise_wrapper(text):
'''
:param text: list of lists where each sublist is of tokenized words
:return: feature format
'''
features = []
for doc in text:
features.append(pointwise_mult(doc))
features = numpy.array(features).T.tolist()
return features
def svd_decomp(sent):
'''
:param sent: a tokenized sentence (or document)
:returns: singular values of the matrix representing the word vectors in that sentence
'''
matrix = transform_sent(sent)
U, s, V = svd(matrix)
return s
@timeit
def svd_wrapper(text):
features = []
for i in range(len(text)):
features.append(svd_decomp(text[i]))
features = numpy.array(features).T.tolist()
return features
@timeit
def auto_encoder(text):
'''
:return: encoded sentence vectors based off of the autoencoder in autoencoder.ipynb
'''
sent_len = len(text[0])
X = numpy.array([transform_sent(s).flatten() for s in text])
encoding_dim = 64
input_sequence = Input(shape=(sent_len * 300,))
encoded = Dense(encoding_dim, activation='relu')(input_sequence)
encoder = Model(input=input_sequence, output=encoded)
encoder.load_weights('encoder_weights')
features = encoder.predict(X)
features = features.T.tolist()
return features
def cos_dist(x, y):
return cosine(x, y)
def log(fvec, hvec):
with open('bgs.csv', 'a') as lfile:
lwriter = csv.writer(lfile)
lwriter.writerow(hvec)
lwriter.writerows(fvec)
def extract_features(text, conf):
all = False
if len(conf)==0:
all = True
global source_text
source_text = text # we'll use global variables to pass the data around
preprocess()
features = [] # features will be list of lists, each component list will have the same length as the list of input text
header = []
# extract requested features: FILL IN HERE
if 'bag_of_function_words' in conf or all:
fvec, hvec = bag_of_function_words()
features.extend(fvec)
header.extend(hvec)
log(fvec, hvec)
if 'encoded' in conf or all:
features.extend(auto_encoder(padded_sequences))
if 'svd_word_vectors' in conf or all:
# Use padded documents to keep the number of singular values consistent
svds = svd_wrapper(padded_sequences)
features.extend(svds)
if 'pointwise_word_vectors' in conf or all:
features.extend(pointwise_wrapper(tokenized_text))
if 'average_word_vectors' in conf or all:
features.extend(average_word_vecs(tokenized_text))
if 'tfidf_word_vectors' in conf or all:
features.extend(average_word_vecs(tokenized_text, tfidf=True))
if 'bag_of_trigrams' in conf or all:
features.extend(ngram_counts(3, 500, stemmed_text))
if 'bag_of_bigrams' in conf or all:
features.extend(ngram_counts(2, 100, stemmed_text))
if 'bag_of_unigrams' in conf or all:
features.extend(ngram_counts(1, 100, stemmed_text))
if 'bag_of_pos_trigrams' in conf or all:
features.extend(ngram_counts(3, 500, tokenized_text, pos=True)) # Tokenized for higher accuracy
if 'bag_of_pos_bigrams' in conf or all:
features.extend(ngram_counts(2, 100, tokenized_text, pos=True))
if 'bag_of_pos_unigrams' in conf or all:
features.extend(ngram_counts(1, 100, tokenized_text, pos=True))
if 'bag_of_char_trigrams' in conf or all:
features.extend(bag_of_character_trigrams())
if 'characters_per_word' in conf or all:
features.extend(characters_per_word(tokenized_text))
if 'unique_words_ratio' in conf or all:
features.extend(unique_words_ratio(tokenized_text))
if 'words_per_sentence' in conf or all:
features.extend(words_per_sentence(source_text)) # Source text b/c its necessary to sentence tokenize the essay
if 'topic_model_scores' in conf or all:
features.extend(topic_models(stemmed_text, 20))
features = numpy.asarray(features).T.tolist() # transpose list of lists sow its dimensions are #instances x #features
with open('features.csv', 'w') as ffile:
fwriter = csv.writer(ffile)
fwriter.writerow(header)
fwriter.writerows(features)
return features
|
Python
|
CL
|
af6de0604463a1b706309589ae241099d2bdd984dc84babf51e714c96835efc0
|
import subprocess
import os
import re
import random, string
import email
import imaplib
import traceback
import datetime
import time
import common
import pool
date_regex = '([0-9]{4}-[0-9]{2}-[0-9]{2})(_([0-9]+))?'
def backup_disks(pool_to_backup, disks, scrub, approve_function):
settings = common.get_settings()
error_disks = list()
for disk in disks:
delete_created_snapshot = False
backup_made = False
try:
print("Performing backup from \"" + pool_to_backup + "\" to \"" + disk["zpool"] + "\"")
# Create a snapshot with a temporary name first. A snapshot shall
# get it's final name only after it has been approved.
print(" Creating temporary snapshot: ", end="", flush=True)
temp_snapshot_name = "TEMP_SNAPSHOT"
created_snapshot = create_snapshot(pool_to_backup, temp_snapshot_name)
print(created_snapshot)
delete_created_snapshot = True
# import the pool
common.open_luks_and_import_pool(disk, 1)
print(" Finding latest backup snapshot on this disk: ", end="", flush=True)
latest_snapshot_this_disk = find_latest_snapshot(disk["zpool"],disk["zpool"])
if latest_snapshot_this_disk != None:
print(latest_snapshot_this_disk)
else:
print("none found")
# export pool again. it may take hours to get approval
common.export_pool_and_close_luks(disk, 1)
print(" Finding latest approved snapshot: ", end="", flush=True)
latest_approved_snapshot = find_latest_snapshot(pool_to_backup,[disk["zpool"] for disk in settings["backup-disks"]])
if latest_approved_snapshot != None:
print(latest_approved_snapshot)
else:
raise Exception
# create diff between new snapshot and last approved
# request approval if there are differences
# if no differences or approval received, continue
ok_to_continue = check_for_diff_and_get_approval(pool_to_backup, disk, latest_approved_snapshot, created_snapshot, approve_function)
if not ok_to_continue:
print(" Omitting backup")
else:
print(" Continuing")
# find the final name for the snapshot
print(" Finding next snapshot name: ", end="", flush=True)
next_snapshot_name = find_next_snapshot_name(pool_to_backup, disk["zpool"])
print(next_snapshot_name)
# Rename the snapshot. Note that if something fails now we shall not remove this snapshot
# because it is the new baseline for what has been approved.
print(" Renaming snapshot " + created_snapshot + " to: ", end="", flush=True)
created_snapshot = rename_snapshot(pool_to_backup, created_snapshot, next_snapshot_name)
print(created_snapshot)
delete_created_snapshot = False
common.open_luks_and_import_pool(disk, 2)
print(" Checking pool health: ", end="", flush=True)
healthy, msg = pool.pool_is_healthy(disk["zpool"])
print(msg, end="", flush=True) # output already contain newline
if not healthy:
raise Exception
if latest_snapshot_this_disk == None:
print(" Performing first backup: ", end="", flush=True)
backup_made,errormsg = perform_first_backup(pool_to_backup, disk["zpool"], created_snapshot)
else:
print(" Performing incremental backup: ", end="", flush=True)
backup_made,errormsg = perform_incremental_backup(pool_to_backup, disk["zpool"], latest_snapshot_this_disk, created_snapshot)
error = False
if backup_made:
print("success")
print(" Checking pool health: ", end="", flush=True)
healthy, msg = pool.pool_is_healthy(disk["zpool"])
print(msg, end="", flush=True) # output already contain newline
if not healthy:
error = True
else:
print("FAILED: " + errormsg)
error = True
if error:
error_disks.append(disk)
# if we shall not scrub any disks or if the disk had an error, export and close now
if not scrub or error:
common.export_pool_and_close_luks(disk, 1)
except Exception as e:
traceback.print_exc()
print(" Backup aborted for " + disk["zpool"])
error_disks.append(disk)
try:
common.export_pool_and_close_luks(disk, 1)
except Exception as e:
print(" Could not export and close disk")
traceback.print_exc()
# delete old snapshots
if backup_made and latest_snapshot_this_disk != None:
all_snapshots_this_disk = find_all_snapshots(pool_to_backup, disk["zpool"])
for snapshot in all_snapshots_this_disk:
if snapshot != created_snapshot:
print(" Deleting old snapshot: " + snapshot)
delete_snapshot(pool_to_backup, snapshot)
# delete temporary snapshot, only if it has not been approved and renamed
if delete_created_snapshot:
print(" Deleting new snapshot: " + created_snapshot)
delete_snapshot(pool_to_backup, created_snapshot)
return error_disks
def create_snapshot(pool, snapshot_name):
# Create the snapshot
cmd = "zfs snapshot -r " + pool + "@" + snapshot_name
cpinst = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":" + cpinst.stderr.decode("utf-8"))
return snapshot_name
def find_next_snapshot_name(pool_to_backup, backup_pool):
basename = backup_pool + "_" + datetime.datetime.now().strftime("%Y-%m-%d")
currentdate = datetime.date.today()
latest_snapshot = find_latest_snapshot(pool_to_backup, backup_pool)
global date_regex
if latest_snapshot != None and re.search(date_regex, latest_snapshot)[1] == currentdate.isoformat():
latest_snapshot_number = int(re.search(date_regex, latest_snapshot)[3]) if re.search(date_regex, latest_snapshot)[3] != None else 0
snapshot_name = basename + "_" + str(latest_snapshot_number + 1)
else:
snapshot_name = basename + "_1"
return snapshot_name
def rename_snapshot(pool, old_snapshot_name, new_snapshot_name):
# Rename the snapshot
cmd = "zfs rename -r " + pool + "@" + old_snapshot_name + " @" + new_snapshot_name
cpinst = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":" + cpinst.stderr.decode("utf-8"))
return new_snapshot_name
def delete_snapshot(pool, snapshot):
cmd = "zfs destroy -r " + pool + "@" + snapshot
cpinst = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":" + cpinst.stderr.decode("utf-8"))
# returns a list sorted by creationtime (latest snapshot last)
def find_all_snapshots(pool, backup_pools):
# if only a single disk dict passed, make a list of it
if type(backup_pools) == str:
new_list = list()
new_list.append(backup_pools)
backup_pools = new_list
# -H: without headers and with single tab between columns
# -r: recursive
# -d 1: depth 1 (only specified dataset)
# -t snapshot: only list snapshots
# -o name: only list name
# -s creation: sort by creation time
cmd = "zfs list -H -r -d 1 -t snapshot -o name -s creation " + pool
cpinst = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":\n" + cpinst.stderr.decode("utf-8"))
stdout = cpinst.stdout.decode("utf-8")
# group 1 will contain the date and group 3 the number (if any)
global date_regex
snapshots = list()
for line in stdout.splitlines():
words = line.split('@')
for backup_pool in backup_pools:
if re.fullmatch(backup_pool + '_' + date_regex, words[1]) != None:
snapshots.append(words[1])
return snapshots
def find_latest_snapshot(pool, backup_pools):
snapshots = find_all_snapshots(pool, backup_pools) # returns a list already sorted by creationtime
if len(snapshots) > 0:
return snapshots[-1]
else:
return None
def create_added_removed_renamed_datasets_diff(pool, old_datasets):
diff_dict = dict()
diff_dict["added"] = list()
diff_dict["removed"] = list()
diff_dict["renamed"] = list() # list of dicts where dict["old-name"] = old_name, dict["new-name"] = new_name
# TODO: Implement
return diff_dict
# return a dict of dataset,diff-text
def create_diff(pool, prev_snapshot, new_snapshot, old_datasets=None):
datasets_on_pool = get_datasets(pool)
diff_dict = dict()
added_datasets = list() # needs to be visible later during normal diff
# create diffs of added/removed/renamed datasets if we get a list of old ones
if old_datasets != None:
# We define the special dataset name '\DATASETS' for the diff dealing with added/removed/renamed datasets
datasets_diff_name = "\\DATASETS"
datasets_diff_dict = create_added_removed_renamed_datasets_diff(pool, old_datasets)
added_datasets = datasets_diff_dict["added"]
removed_datasets = datasets_diff_dict["removed"]
renamed_datasets = datasets_diff_dict["renamed"]
diff = str()
# handle added_datasets
for dataset in added_datasets:
diff = diff + '+\t' + dataset + '\n'
# handle removed_datasets
for dataset in removed_datasets:
diff = diff + '-\t' + dataset + '\n'
# handle renamed_datasets
for rename_diff in renamed_datasets:
diff = diff + 'R\t' + rename_diff["old-name"] + " -> " + rename_diff["new-name"] + '\n'
diff_dict[datasets_diff_name] = diff
# create diffs for all datasets
for dataset in datasets_on_pool:
# check that dataset existed in last snapshot
cmd = "zfs list -H " + dataset + "@" + prev_snapshot
cpinst = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
diff = str()
if cpinst.returncode == 1 and re.search("does not exist", cpinst.stderr.decode("utf-8")):
# previous snapshot did not exist in this dataset
# if the dataset has not been detected as added, print this info in the diff
if dataset not in added_datasets:
if old_datasets != None: # if we have performed a diff of dataset lists
diff = "Warning: This dataset was not detected as added but did not have the previous snapshot."
else:
diff = "Warning: This dataset did not have the previous snapshot. Is it a new dataset?"
elif cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":" + cpinst.stderr.decode("utf-8"))
else: # dataset existed in last snapshot, perform diff
cmd = "zfs diff -FHt " + dataset + "@" + prev_snapshot + " " + dataset + "@" + new_snapshot
cpinst = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":" + cpinst.stderr.decode("utf-8"))
stdout = cpinst.stdout.decode("utf-8")
for line in stdout.splitlines():
columns = line.split('\t')
timestamp_str = columns[0]
timestamp = int(timestamp_str.split('.')[0])
dateandtime = str(datetime.datetime.fromtimestamp(timestamp))
difftype = columns[1]
filetype = columns[2]
filepath = columns[3]
diff = diff + difftype + '\t' + filetype + '\t' + dateandtime + '\t' + filepath + '\n'
if len(diff) > 0:
diff_dict[dataset] = diff
return diff_dict
def create_diff_text(diff_dict):
diff_text = str()
first = True
for key in diff_dict:
if not first: # print an extra newline before each new dataset
diff_text += '\n'
else:
first = False
diff_text += key + '\n'
diff_text += diff_dict[key] + '\n'
return diff_text
def approve_by_console(diff_dict):
# present diff
userinput = input(" Specify a viewer to use or leave empty to print to console: ")
userinput = userinput.strip()
editor = userinput if len(userinput) > 0 else None
diff_text = create_diff_text(diff_dict)
if editor == None:
print(diff_text)
else:
diff_file = "BACKUP_TEMP.diff"
print(" Creating temporary diff file: " + diff_file)
with open(diff_file, 'w') as f:
f.write(diff_text)
cmd = editor + ' ' + diff_file
try:
cpinst = subprocess.run(cmd.split(), stderr=subprocess.PIPE)
finally:
print(" Removing temporary diff file")
os.remove(diff_file)
if cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\": " + cpinst.stderr.decode("utf-8"))
# get approval
inp = input(" Are the changes ok? Type \"YES\": ")
if inp == "YES":
return True
else:
return False
def get_email_text(msg):
if msg.is_multipart():
for part in msg.walk():
# each part is a either non-multipart, or another multipart message
# that contains further parts... Message is organized like a tree
if part.get_content_type() == 'text/plain':
return part.get_payload() # return the raw text
else:
return msg.get_payload()
def approve_by_mail_single(diff_dict):
# present diff
settings = common.get_settings()
approve_settings = settings["approve-method-mail-settings"]
sender = approve_settings["sender-name"]
randomstring = ''.join(random.choices(string.ascii_letters + string.digits, k=16))
subject = "Subject: " + approve_settings["subject"] + ": " + randomstring + '\n'
diffmsg = """Hello,
I am the little backup robot. I have detected changes and am eager to perform a backup right away!
But I must wait until my human approves the changes...
Approve by replying "yes" (without the quotes).
Deny by replying "no" (also without the quotes).
The changes:\n\n"""
diff_text = create_diff_text(diff_dict)
diffmsg = diffmsg + diff_text
cmd = ("sendmail", "-F", sender, approve_settings["recipient"])
cpinst = subprocess.run(cmd, input=subject+diffmsg, encoding="utf-8", stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":" + cpinst.stderr)
print(" Diff mail(s) sent.")
# get approval
approved = False
print(" Logging in to mail server to wait for replies...", end="", flush=True)
server = imaplib.IMAP4_SSL(approve_settings["imap-server"], port=approve_settings["imap-port"])
rv, data = server.login(approve_settings["imap-account"], approve_settings["imap-password"])
print("done")
rv, data = server.select()
if rv == 'OK':
print(" Waiting for approval(s). Timeout is " + str(approve_settings["timeout"]) + " seconds.")
approval_received = False
start_time = time.time()
while not approval_received and time.time() < start_time + approve_settings["timeout"]:
server.recent()
rv, data = server.search(None, "(SUBJECT " + randomstring + ")")
if rv != 'OK':
print(" Could not search mails!")
return
for num in data[0].split():
rv, data = server.fetch(num, '(RFC822)')
if rv != 'OK':
print(" ERROR fetching mail", num)
return
msg = email.message_from_bytes(data[0][1])
print(" Reply from " + str(msg['From']) + ": ", end="", flush=True)
the_reply = get_email_text(msg)
for line in the_reply.splitlines():
strippedline = line.strip()
if strippedline != '':
if strippedline.lower() == "yes":
print("Approved!")
approval_received = True
approved = True
elif strippedline.lower() == "no":
print("Declined!")
approval_received = True
approved = False
else:
print("Invalid response:\n")
for line in the_reply.splitlines():
print(" " + line)
print("\n Still waiting.")
# we have found the first line that wasn't whitespace, don't process the rest
break
# delete the mail
server.store(num, '+FLAGS', '\\Deleted')
if not approval_received:
time.sleep(10)
if not approval_received:
print(" Timeout")
server.close()
else:
print(" ERROR: Unable to open mailbox ", rv)
server.logout()
return approved
approve_methods = {"console": approve_by_console, "mail": approve_by_mail_single}
def check_for_diff_and_get_approval(pool_to_backup, backup_disk, prev_snapshot, new_snapshot, approve_function):
print(" Checking for diff from the last approved snapshot")
diff_dict = create_diff(pool_to_backup, prev_snapshot, new_snapshot)
# check if we have any differences
ok_to_cont = False
if len(diff_dict) > 0:
print(" Diff found. Continuing to get approval")
ok_to_cont = approve_function(diff_dict)
else:
print(" No diff")
ok_to_cont = True
return ok_to_cont
def get_datasets(pool):
cmd = "zfs list -rH -o name " + pool
cpinst = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":" + cpinst.stderr.decode("utf-8"))
lines = cpinst.stdout.decode("utf-8").splitlines()
datasets = list()
for line in lines:
if line != pool:
datasets.append(line)
return datasets
def check_snapshot_on_pool(pool, snapshot):
# Verify that all snapshots have been created on the backup pool
datasets = get_datasets(pool)
datasets_not_on_pool = list()
for dataset in datasets:
cmd = "zfs list -H " + dataset + "@" + snapshot
cpinst = subprocess.run(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if cpinst.returncode == 1 and re.search("does not exist", cpinst.stderr.decode("utf-8")):
datasets_not_on_pool.append(dataset)
elif cpinst.returncode != 0:
raise Exception("Error in \"" + cmd + "\":" + cpinst.stderr.decode("utf-8"))
return datasets_not_on_pool
def perform_first_backup(pool_to_backup, backup_pool, snapshot):
backup_made = False
error = False
cmd_send = "zfs send -R " + pool_to_backup + "@" + snapshot
cmd_recv = "zfs recv -Fdu " + backup_pool
psend = subprocess.Popen(cmd_send.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
precv = subprocess.Popen(cmd_recv.split(), stdin=psend.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#psend.stdout.close() # Allow send to receive a SIGPIPE if recv exits. (from subprocess docs example)
# The above can't be used because we can't call communicate to read
# stderr from send if we've closed the handle.
recv_stderr = precv.communicate()[1]
if precv.returncode != 0:
raise Exception("Error in \"" + cmd_recv + "\":" + recv_stderr.decode("utf-8"))
send_stderr = psend.communicate()[1]
if psend.returncode != 0:
raise Exception("Error in \"" + cmd_send + "\":" + send_stderr.decode("utf-8"))
datasets_not_backed_up = check_snapshot_on_pool(backup_pool, snapshot)
if len(datasets_not_backed_up) > 0:
error = "Error! Snapshot missing in backup on the following datasets: "
for dataset in datasets_not_backed_up:
error = error + dataset + ' '
return (False,error)
else:
return (True,error)
def perform_incremental_backup(pool_to_backup, backup_pool, prev_snapshot, new_snapshot):
backup_made = False
error = False
cmd_send = "zfs send -R -I " + pool_to_backup + "@" + prev_snapshot + " " + pool_to_backup + "@" + new_snapshot
cmd_recv = "zfs recv -Fdu " + backup_pool
psend = subprocess.Popen(cmd_send.split(), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
precv = subprocess.Popen(cmd_recv.split(), stdin=psend.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
#psend.stdout.close() # Allow send to receive a SIGPIPE if recv exits. (from subprocess docs example)
# The above can't be used because we can't call communicate to read
# stderr from send if we've closed the handle.
recv_stderr = precv.communicate()[1]
send_stderr = psend.communicate()[1]
if precv.returncode != 0:
raise Exception("Error in \"" + cmd_recv + "\":" + recv_stderr.decode("utf-8"))
if psend.returncode != 0:
raise Exception("Error in \"" + cmd_send + "\":" + send_stderr.decode("utf-8"))
datasets_not_backed_up = check_snapshot_on_pool(backup_pool, new_snapshot)
if len(datasets_not_backed_up) > 0:
error = "Error! Snapshot missing in backup on the following datasets: "
for dataset in datasets_not_backed_up:
error = error + dataset + ' '
return (False,error)
else:
return (True,error)
|
Python
|
CL
|
31c23fee5da5a32480cb38c234cc311a23b8d25897f31eaf5c97d39001e5b43f
|
#!/usr/bin/env python
# coding: utf-8
import random
import numpy as np
import torch
import utils
import checkers
import numpy as np
import matplotlib.pyplot as plt
import cv2
import checkers_swig
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import trange
from IPython.display import clear_output
import matplotlib.pyplot as plt
from utils import play_and_log_episode, img_by_obs
from tensorboardX import SummaryWriter
from time import gmtime, strftime
class PreprocessedCheckers(checkers.CheckersEnvironment):
def __init__(self, american):
"""A gym wrapper that crops, scales image into the desired shapes and grayscales it."""
checkers.CheckersEnvironment.__init__(self, american)
def _observation(self, img):
"""what happens to each observation"""
img = checkers.CheckersEnvironment._observation(self, img)
normalized = img
normalized[img == 2] = -1
normalized[img == 4] = -2
normalized[img == 3] = 2
return normalized
def make_opts():
return dict(american=True, black_strategy=checkers_swig.MakeMCSTStrategy(checkers_swig.Team_Black, 100))
def make_env():
opts = make_opts()
env = PreprocessedCheckers(opts['american'])
env.reset(**opts)
return env, opts
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device
def conv2d_size_out(size, kernel_size, stride):
"""
common use case:
cur_layer_img_w = conv2d_size_out(cur_layer_img_w, kernel_size, stride)
cur_layer_img_h = conv2d_size_out(cur_layer_img_h, kernel_size, stride)
to understand the shape for dense layer's input
"""
return (size - (kernel_size - 1) - 1) // stride + 1
class Flatten(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.view(x.size(0), -1)
class DQNAgent(nn.Module):
def __init__(self, action_state_shape, epsilon=0):
super().__init__()
self.epsilon = epsilon
self.action_state_shape = action_state_shape
self.conv1 = nn.Conv2d(self.action_state_shape[0], 16, 3, 2)
self.conv2 = nn.Conv2d(16, 32, 3, 2)
out_conv_shape = conv2d_size_out(conv2d_size_out(self.action_state_shape[1], 3, 2), 3, 2), conv2d_size_out(conv2d_size_out(self.action_state_shape[2], 3, 2), 3, 2)
self.flatten = Flatten()
self.linear1 = nn.Linear(32 * out_conv_shape[0] * out_conv_shape[1], 256)
self.linear2 = nn.Linear(256, 1)
def forward(self, state_t):
"""
:param state_t: a batch of 2-frame buffers, shape = [batch_size, 2, h, w]
"""
x = F.relu(self.conv1(state_t))
x = F.relu(self.conv2(x))
x = self.flatten(x)
x = F.relu(self.linear1(x))
qvalues = self.linear2(x)
assert qvalues.requires_grad, "qvalues must be a torch tensor with grad"
assert len(
qvalues.shape) == 2 and qvalues.shape[0] == state_t.shape[0] and qvalues.shape[1] == 1
return qvalues
def get_qvalues(self, states, actions_list):
model_device = next(self.parameters()).device
states_list = []
variants_cummulative = [0]
for state, actions in zip(states, actions_list):
states_list += [[state, action] for action in actions]
variants_cummulative.append(variants_cummulative[-1] + len(actions))
states = np.array(states_list)
states = torch.tensor(states, device=model_device, dtype=torch.float)
qvalues = self.forward(states)
qvalues = qvalues.data.cpu().numpy()
q_list = []
for i in range(len(variants_cummulative) - 1):
q_list.append(qvalues[variants_cummulative[i]:variants_cummulative[i + 1]])
return q_list
def sample_actions(self, qvalues):
epsilon = self.epsilon
batch_size = len(qvalues)
random_actions = [np.random.choice(len(actions)) for actions in qvalues]
best_actions = [actions.argmax(axis=0) for actions in qvalues]
should_explore = np.random.choice(
[0, 1], batch_size, p=[1-epsilon, epsilon])
return np.where(should_explore, random_actions, best_actions)
def evaluate(env, agent, n_games=1, greedy=False, t_max=10000):
rewards = []
for _ in range(n_games):
env.reset(**make_opts())
s = env.observation()
reward = 0
for _ in range(t_max):
actions_values = env.current_possible_actions_values()
actions = env.possible_actions(env.env.CurrentState())
qvalues = agent.get_qvalues([s], [actions_values])
action = qvalues[0].argmax(axis=0)[0] if greedy else agent.sample_actions(qvalues)[0][0]
s, r, done, _ = env.step(action)
reward += r[0]
if done:
break
rewards.append(reward)
return np.mean(rewards)
class ReplayBuffer(object):
def __init__(self, size):
self._storage = []
self._maxsize = size
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, act_tp1, done):
data = (obs_t, action, reward, obs_tp1, act_tp1, done)
if len(self) == self._maxsize:
self._storage = self._storage[1:]
self._storage.append(data)
def sample(self, batch_size):
idxes = np.random.choice(np.arange(len(self)), batch_size)
states, actions, rewards, next_states, next_actions, dones = [], [], [], [], [], []
for idx in idxes:
state, action, reward, next_state, next_action, done = self._storage[idx]
states.append(state)
actions.append(action)
rewards.append(reward)
next_states.append(next_state)
next_actions.append(next_action)
dones.append(done)
return np.array(states), np.array(actions), np.array(rewards), np.array(next_states), np.array(next_actions), np.array(dones)
def play_and_record(initial_state, agent, env, exp_replay, n_steps=1):
s = initial_state
sum_rewards = 0
for n in range(n_steps):
actions_values = env.current_possible_actions_values()
actions = env.possible_actions(env.env.CurrentState())
q_list = agent.get_qvalues([s], [actions_values])
a = agent.sample_actions(q_list)[0][0]
act = actions_values[a]
next_s, r, done, _ = env.step(a)
exp_replay.add(s, act, r, next_s, env.current_possible_actions_values(), done)
s = next_s
sum_rewards += r
if done:
env.reset(**make_opts())
s = env.observation()
return sum_rewards, s
def add_actions_to_states(states, actions):
batch = []
indices = []
for i, l in enumerate(actions):
if len(l) == 0:
l = [states[i]]
indices.append((0 if i == 0 else indices[-1]) + len(l))
for action in l:
batch.append([states[i], action])
return np.array(batch), indices
def compute_td_loss(states, actions, rewards, next_states, next_actions, is_done,
agent, target_network,
gamma=0.99,
check_shapes=False,
device=device):
batch_size = states.shape[0]
states = np.array([states, actions]).transpose((1, 0, 2, 3))
states = torch.tensor(states, device=device, dtype=torch.float) # shape: [batch_size, *state_shape]
actions = torch.tensor(actions, device=device, dtype=torch.long) # shape: [batch_size]
rewards = torch.tensor(rewards, device=device, dtype=torch.float) # shape: [batch_size]
# shape: [batch_size, *state_shape]
next_states, next_state_idxs = add_actions_to_states(next_states, next_actions)
next_states = torch.tensor(next_states, device=device, dtype=torch.float)
is_done = torch.tensor(
is_done.astype('float32'),
device=device,
dtype=torch.float
) # shape: [batch_size]
is_not_done = 1 - is_done
predicted_qvalues = agent(states)
predicted_next_qvalues = target_network(next_states)
predicted_qvalues_for_actions = predicted_qvalues[range(
len(actions)), 0]
n_values = []
for start, end in zip([0] + next_state_idxs[:-1], next_state_idxs):
n_values.append(torch.max(predicted_next_qvalues[start:end], dim=0)[0])
next_state_values = torch.cat(n_values).reshape(batch_size)
target_qvalues_for_actions = gamma * next_state_values * is_not_done + rewards[:,0] # <YOUR CODE>
loss = torch.mean((predicted_qvalues_for_actions -
target_qvalues_for_actions.detach()) ** 2)
return loss
seed = 0xbadf00d
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
env, opts = make_env()
state_shape = (2, 8, 8)
state = env.observation()
agent = DQNAgent(state_shape, epsilon=1).to(device)
target_network = DQNAgent(state_shape).to(device)
target_network.load_state_dict(agent.state_dict())
buf_size = 10**4
print('Heating up replay buffer of size {}'.format(buf_size))
exp_replay = ReplayBuffer(buf_size)
for i in range(100):
if not utils.is_enough_ram(min_available_gb=0.1):
print("""
Less than 100 Mb RAM available.
Make sure the buffer size in not too huge.
Also check, maybe other processes consume RAM heavily.
"""
)
break
play_and_record(state, agent, env, exp_replay, n_steps=10**2)
if len(exp_replay) == buf_size:
break
print('Finished: {} plays'.format(len(exp_replay)))
timesteps_per_epoch = 1
batch_size = 16
total_steps = 3 * 10**6
decay_steps = 10**6
opt = torch.optim.Adam(agent.parameters(), lr=1e-4)
init_epsilon = 0.4645
final_epsilon = 0.1
loss_freq = 50
refresh_target_network_freq = 5000
eval_freq = 5000
max_grad_norm = 50
n_lives = 5
# In[27]:
mean_rw_history = []
td_loss_history = []
grad_norm_history = []
initial_state_v_history = []
writer = SummaryWriter('runs/qLearning' + strftime('%a%d%b%Y%H%M%S', gmtime()))
env.reset(**make_opts())
state = env.observation()
for step in trange(total_steps + 1):
agent.epsilon = utils.linear_decay(init_epsilon, final_epsilon, step, decay_steps)
# play
_, state = play_and_record(state, agent, env, exp_replay, timesteps_per_epoch)
# train
batch_s, batch_a, batch_r, batch_ns, batch_na, batch_done = exp_replay.sample(batch_size)
loss = compute_td_loss(batch_s, batch_a, batch_r, batch_ns, batch_na, batch_done, agent, target_network)
loss.backward()
grad_norm = nn.utils.clip_grad_norm_(agent.parameters(), max_grad_norm)
opt.step()
opt.zero_grad()
if step % loss_freq == 0:
td_loss_history.append(loss.data.cpu().item())
grad_norm_history.append(grad_norm)
if step % refresh_target_network_freq == 0:
target_network.load_state_dict(agent.state_dict())
if step % eval_freq == 0:
torch.save(agent.state_dict(), 'qnn_checkers' + str(step) + '.pt')
mean_rw_history.append(evaluate(
make_env()[0], agent, n_games=3 * n_lives, greedy=True)
)
e = make_env()[0]
initial_state_q_values = agent.get_qvalues(
[e.observation()], [e.current_possible_actions_values()]
)
initial_state_v_history.append(np.max(initial_state_q_values))
print("buffer size = %i, epsilon = %.5f" %
(len(exp_replay), agent.epsilon))
writer.add_scalar('mean_per_life', torch.tensor(mean_rw_history[-1]), step)
assert not np.isnan(td_loss_history[-1])
writer.add_scalar('td_loss_history', torch.tensor(utils.smoothen(td_loss_history)[-1]), step)
writer.add_scalar('initial_state_v_history', torch.tensor(initial_state_v_history[-1]), step)
writer.add_scalar('grad_norm_history', torch.tensor(utils.smoothen(grad_norm_history)[-1]), step)
writer.file_writer.flush()
final_score = evaluate(
make_env(clip_rewards=False, seed=9),
agent, n_games=30, greedy=True, t_max=10 * 1000
) * n_lives
|
Python
|
CL
|
f94778f631925607039e7a2590803d3e3c1fc7d6c82deb7daa047ef6ff09c376
|
from __future__ import absolute_import
from django.db import transaction, IntegrityError, DatabaseError
from django.test import TestCase
from .models import Counter, WithCustomPK
class ForceTests(TestCase):
def test_force_update(self):
c = Counter.objects.create(name="one", value=1)
# The normal case
c.value = 2
c.save()
# Same thing, via an update
c.value = 3
c.save(force_update=True)
# Won't work because force_update and force_insert are mutually
# exclusive
c.value = 4
self.assertRaises(ValueError, c.save, force_insert=True, force_update=True)
# Try to update something that doesn't have a primary key in the first
# place.
c1 = Counter(name="two", value=2)
self.assertRaises(ValueError, c1.save, force_update=True)
c1.save(force_insert=True)
# Won't work because we can't insert a pk of the same value.
sid = transaction.savepoint()
c.value = 5
self.assertRaises(IntegrityError, c.save, force_insert=True)
transaction.savepoint_rollback(sid)
# Trying to update should still fail, even with manual primary keys, if
# the data isn't in the database already.
obj = WithCustomPK(name=1, value=1)
self.assertRaises(DatabaseError, obj.save, force_update=True)
|
Python
|
CL
|
3ec179c5315f59b88e98e87db0123f6caa2a28589cc8c4057d11df8f2716b8b3
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
# CreateDate:
# Author:
import utils
import time
import datetime
import numpy as np
import pandas as pd
import redis
IS_INIT = True
def get_realdata():
"""
仿真产生真实数据
:return:
"""
realdata = list(dict())
for i in range(40):
t = time.time()
rd = np.random.rand(4).tolist()
d = {'datetime':int(t),
'microsecond': (t - int(t)) * 100,
'channel_names': ['300MT', '301MT', '302MT', '303MT'],
'realdata': rd
}
realdata.append(d)
time.sleep(0.1)
return realdata
def average_list(data):
"""
返回列表的平均值
:param data: 列表
:return: 列表的平均值
"""
dsum = 0
for i in range(len(data)):
dsum += data[i]
return dsum / len(data)
def data_process(q1, q2, q3):
"""
对数据进行处理,从q1中读取数据,处理后,秒级数据放入q2,;原始数据(高频数据)放入redis
:param q1: 队列;存储从tcpserver读取的dict类型原始数据
:param q2: 队列;存储秒级dict类型数据
:param q3:队列;存储高频dict类型数据
:return:
"""
# 温度的高存数据保存在db=1中,模拟量保存在db=2中,开关量保存在db=3中
r = redis.Redis(host='localhost', port=6379, db=1)
current_second = None
df = pd.DataFrame()
"""
20180927
增加current_temperature_data的格式
current_temperature_data = {"DateTime":1, "Datas":{"300MT":1, "301MT":1}}
"""
current_temperature_data = dict() # 存储放入kafka的数据
"""
20180927
增加high_data的格式
high_data = {"300MT": {"DateTime": 1, "Frequency": 1, "Datas": [1, 2, 3, 4]},
"301MT": {"DateTime": 1, "Frequency": 1, "Datas": [1, 2, 3, 4]}}
"""
high_data = dict() #存储放入MongoDB的高频数据
index = 0
while True:
try:
real_datas = q1.get(block=False)
q1.task_done()
print("time", real_datas['datetime'])
except:
continue
# 对current_second和high_data进行初始化
if not current_second:
for key in real_datas['channel_names']:
high_data[key] = {}
high_data[key]["Datas"] = []
current_second = real_datas['datetime']
# df = pd.DataFrame(columns=real_datas['channel_names'])
# 如果是同一秒的数据,就送入high_data
if current_second == real_datas['datetime']:
i = 0
for key in real_datas['channel_names']:
high_data[key]["Datas"].append(real_datas['realdata'][i])
high_data[key]["DateTime"] = current_second * 1000
high_data[key]["Frequency"] = real_datas['frequency']
i += 1
# df.loc[index] = real_datas['realdata']
index += 1
else:
# 如果不是同一秒的数据,对数据进行预处理,送入kafka和redis
current_temperature_data['DateTime'] = current_second * 1000 # 精确到毫秒
dd = {}
# dd = df.mean().to_dict() # 将秒级数据求平均,然后转换为字典{'4LHP300MT':300, '4LHP301MT':301}
for key in high_data:
dd[key] = high_data[key]["Datas"]
current_temperature_data['Datas'] = dd
# 将数据放入队列,队列将发给kafka
try:
q2.put(current_temperature_data, block=True)
q2.task_done()
# print(current_temperature_data)
except:
print("q2 error")
continue
print("队列大小:", q2.qsize())
# 将1秒的数据送入redis数据库
try:
r.lpush("current_temperature_data", current_temperature_data)
except:
continue
# 将高存数据送往redis高速缓存
try:
name = str(current_second)
life_time = 50 # 生存时间
r.setex(name, high_data, life_time) # 将数据保存到redis中,生存时间50秒。时间到了之后,会自动删除。
except:
continue
# 变量清空
# df.drop(df.index, inplace=True) # 清空df
current_temperature_data.clear() # 清空字典
# 初始化
current_second = real_datas['datetime']
i = 0
for key in real_datas['channel_names']:
high_data[key]["Datas"] = []
high_data[key]["Datas"].append(real_datas['realdata'][i])
high_data[key]["DateTime"] = current_second * 1000
high_data[key]["Frequency"] = real_datas['frequency']
i += 1
index = 1 # 初始化索引
# current_second = real_datas['datetime'] # 将下一秒赋给current_second
# df.loc[index] = real_datas['realdata'] #
if __name__ == '__main__':
# real_datas = get_realdata()
# run(real_datas)
pass
|
Python
|
CL
|
eacab8abc2ac78eab92efd9fcfc8b7efc32f2c026eb703a761fac367914c6499
|
# -*- coding: utf-8 -*-
"""Auto File Organiser made in Python
FileOrganiser
=============
Provides
1. Rearranging files in folders based on their types
2. Script run infinitly so that new files automatically get organised
Use
> Give path of folder as argument while excecuting the script or else
Current Working Directory will be used
Example
python automate.pyw <path_to_dir>
"""
import os
import shutil
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import PatternMatchingEventHandler
DIR_TYPE={
'Documents':['pdf','docx','doc','csv','txt','xls','xlsx','log'],
'Archives':['zip','tar'],
'Pictures':['jpg','jpeg','png','gif'],
'Code':['py','css','js','html'],
'Audio':['mp3'],
"Video":['mp4','srt','mkv','3gp'],
"Package":['exe','ini'],
"Torrent":['torrent'],
"SQL":['sql'],
"SKIP":['crdownload', 'fdmdownload'],
#All uncatogerised file extensions will go in 'Other' folder
}
PATH = sys.argv[1] if len(sys.argv) > 1 else '.'
def movefile(file_name,dir_type):
"""Function that actually move files
Arguments:
file_name {string} -- name of file to move
dir_type {string} -- type of directory to move that file into
"""
try:
dir_location=os.path.join(PATH,dir_type)
if not os.path.exists(dir_location):
os.mkdir(dir_location)
original_filename = file_name
file_name = os.path.basename(file_name)
if os.path.exists(os.path.join(dir_location, file_name)):
i = 1
splited_text=os.path.basename(file_name).split('.')
while os.path.exists(os.path.join(dir_location, splited_text[0]+str(i)+"."+splited_text[1])):
i += 1
file_name = splited_text[0] + str(i) + "." + splited_text[1]
shutil.move(original_filename,os.path.join(dir_location, file_name))
except:
time.sleep(5)
organize()
def organize(event = None):
"""this function find all files inside dir and move it to specific folder
Keyword Arguments:
event {event} -- No used currently, but required as event_handler
send an event while calling this function
(default: {None})
"""
file_types= { file_type:dir_name
for dir_name,file_types in DIR_TYPE.items() for file_type in file_types
}
onlyfiles = [f for f in os.listdir(PATH)
if os.path.isfile(os.path.join(PATH, f))]
for file_name in onlyfiles:
splited_text=file_name.split('.')
if len(splited_text)>1:
ext=splited_text[-1].lower()
if file_types[ext] == "SKIP":
continue
if ext in file_types:
movefile(os.path.join(PATH,file_name),file_types[ext])
else:
movefile(os.path.join(PATH,file_name), "Other")
if __name__ == "__main__":
organize()
patterns = "*"
ignore_patterns = ""
ignore_directories = True
case_sensitive = True
my_event_handler = PatternMatchingEventHandler(patterns, ignore_patterns,
ignore_directories, case_sensitive)
my_event_handler.on_created = organize
my_event_handler.on_modified = organize
go_recursively = False
my_observer = Observer()
my_observer.schedule(my_event_handler, PATH, recursive=go_recursively)
my_observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
my_observer.stop()
my_observer.join()
|
Python
|
CL
|
d27ae319f5938fb0be1825023893b81177efdd20d9bb25c344cbd51f2e36a8e7
|
from json.decoder import JSONDecodeError
import os
import sys
import cgi
import json
from http.server import BaseHTTPRequestHandler
# solve path problem
abs_path = os.path.abspath(__file__)
father_path = os.path.abspath(os.path.dirname(abs_path) + os.path.sep + ".")
project_path = os.path.abspath(os.path.dirname(father_path) + os.path.sep + ".")
sys.path.append(project_path)
from TestModel.archLmirBm25Model import archLmirBm25Model
class PostHandler(BaseHTTPRequestHandler):
"""
Http服务的handler类, 实现了post请求的处理.
要求post请求的'Content-type'为'application/json'而且具有我
们下面定义的输入内容, 否则返回400.
可识别的输入内容示例:
{
"query": ["图书馆", "苏州"],
"weight": [0.1, 0.9]
}
返回的json文件格式如下:
{
"result":{
"imageId":[图片id列表],
"imageSim": [图片的相似度, 归一化到0-1区间内]
"imagePath":[图片path列表],
"imageTitle":[图片title列表],
"imageAnno": [要显示的图片标注列表],
}
"status":{
"statusCode": 0 or 1,
"statusMsg": "String"
}
}
TODO:
1. 添加注释内容. [Done]
2. 增加有weight和无weight的不同行为, 目前默认使用weight. [Done]
3. 增加服务器启动的参数, 包括ip地址和port. [Done]
4. 测试可用性. [Done]
5. 增加状态码. [Done]
"""
# 在实例化之前加载的全局变量(查询类)
archPath = "./Dataset/Arch/DemoData_20201228.json"
model = archLmirBm25Model(archPath=archPath)
def _information_retrieval(self, query, weight, limit=0):
"""
使用weight(或者不用)和query进行信息检索的方法.
Input:
query: 处理好的字符串列表.
weight: 处理好的权重列表, 可以留空.
Return:
返回格式如下的python字典:
{
"result":{
"imageId":[图片id列表],
"imageSim": [图片的相似度, 归一化到0-1区间内]
"imagePath":[图片path列表],
"imageTitle":[图片title列表],
"imageAnno": [要显示的图片标注列表],
}
"status":{
"statusCode": 0 or 1,
"statusMsg": "String"
}
}
若结果出了问题则返回空字典
"""
# check weight is same length as query
useWeight = weight and len(weight) == len(query)
# init result
result = {}
result["result"] = {}
result["status"] = {}
imagePaths = []
imageTitles = []
imageAnno = []
imageDesc = []
projectId = []
# search a list of word/sentence
try:
if useWeight:
sortedResult, index, copora, annoIds, imageIds = self.model.searchWords(listWords=query, weights=weight)
else:
sortedResult, index, copora, annoIds, imageIds = self.model.searchSentence(listWords=query)
except Exception as e:
result["status"]["statusCode"] = 1
result["status"]["statusMsg"] = "Fail, catch exception: {} when retrieving".format(e)
return result
# limit number of the result entry's
if limit != 0 and limit < len(sortedResult):
sortedResult = sortedResult[0:limit]
index = index[0:limit]
copora = copora[0:limit]
annoIds = annoIds[0:limit]
imageIds = imageIds[0:limit]
# get all result
for (annoId, imageId) in zip(annoIds, imageIds):
# get anno dict
annoDict = self.model.archDataset.anns[annoId]
imagePaths.append(self.model.archDataset.imgs[imageId]["targetUrl"])
# get anno, return None if dont have one
imageTitles.append(annoDict.get("title", None))
imageDesc.append(annoDict.get("description", None))
projectId.append(annoDict.get("projectId", None))
# init string to show
annoString = ""
for key, value in annoDict.items():
if key == "concateText" or key == "cutConcateText" or key == "labels":
continue
else:
annoString += "{} : {}\n".format(key, value)
imageAnno.append(annoString)
else:
# result
result["result"]["imageId"] = imageIds
result["result"]["imagePath"] = imagePaths
result["result"]["imageTitle"] = imageTitles
result["result"]["imageAnno"] = imageAnno
result["result"]["imageDesc"] = imageDesc
result["result"]["projectId"] = projectId
result["result"]["imageSim"] = sortedResult.tolist()
# statue
result["status"]["statusCode"] = 0
result["status"]["statusMsg"] = "Success"
return result
def _set_headers(self):
"""设定返回的头部"""
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_HEAD(self):
"""返回头部"""
self._set_headers()
# POST echoes the message adding a JSON field
def do_POST(self):
"""
处理post请求, 保证输入为json格式并具有目标格式的内容, 并返回需要的结果(json格式)
"""
# ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
# removed
# refuse to receive non-json content
# if ctype != 'application/json':
# self.send_response(400)
# self.end_headers()
# return
# read the message and convert it into a python dictionary
try:
length = int(self.headers.get('content-length'))
message = json.loads(self.rfile.read(length))
except JSONDecodeError as e:
self.send_response(400)
print("content type is not json (cannot decode)\n")
self.end_headers()
# get input query and weight, if not exist, refuse.
try:
query = message['query']
weight = message['weight']
except KeyError as e:
self.send_response(400)
print("Didnt find 'query' and 'weight' key.")
self.end_headers()
return
# get result entry limit
# default limit to 5000 result entry
try:
limit = message['limit']
limit = int(limit)
except Exception as e:
limit = 0
# generate result
result = self._information_retrieval(query, weight, limit=limit)
# send the message back
self._set_headers()
self.wfile.write(json.dumps(result).encode("utf-8"))
return
if __name__ == '__main__':
"""启动server代码"""
import argparse
from http.server import HTTPServer
# add parser, now we can set ip and port.
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--ip", default="0.0.0.0", help="ip address of server", required=False)
parser.add_argument("-p", "--port", type=int, default=35008, help="port of server", required=False)
args = parser.parse_args()
# run server
server = HTTPServer((args.ip, args.port), PostHandler)
print('Starting server, use <Ctrl-C> to stop')
server.serve_forever()
# test
# curl -H "Accept: application/json" -H "Content-type: application/json" -X POST -d '{"query":["a", "b"], "weight":[0.1, 0.9]}' -o testresult.json http://127.0.0.1:35008
|
Python
|
CL
|
5e075661937b43324bac2067b69051698e025e396e13b2ffc58f0da4c898d865
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
"""
Escriba una función qué reciba cómo parámetros: una cadena con el código alfanumérico del estudiante y cinco números
enteros (nota1, nota2, nota3, nota4, nota5) que representan las notas de los quices del semestre y retorne una cadena
de caracteres que le proporciona al profesor la información que desea obtener. La cadena debe tener la siguiente
estructura: "El promedio ajustado del estudiante {código} es: {promedio}" dónde, el promedio reportado debe cumplir
con las especificaciones mencionadas anteriormente (redondeado a dos decimales, en escala de 0 a 5 y calculado
eliminando la peor de las cinco notas del estudiante).
"""
def nota_quices(codigo: str, nota1: int, nota2: int, nota3: int, nota4: int, nota5: int) -> str:
''' Nota quices
:Parámetros:
codigo (str): codigo único alfanumérico del estudiante
nota1 (int): Nota del primer quiz reto semestre (0 - 100)
nota2 (int): Nota del segundo quiz del semestre (0 - 100)
nota3 (int): Nota del tercer quiz del semestre (0 - 100)
nota4 (int): Nota del cuarto quiz del semestre (0 - 100)
nota5 (int): Nota del quinto quiz del semestre (0 - 100)
Retorno:
String: de la forma "El promedio ajustado del estudiante {codigo} es: {promedio}" dónde, el promedio se
calcula eliminando la peor nota y se reporta con dos decimales utilizando la escala numérica de 0 a 5
'''
# Paramétros de Función
# codigo = 'AA0010278'
# nota1 = 98
# nota2 = 69
# nota3 = 87
# nota4 = 47
# nota5 = 50
# Encontrar minima nota a ser eliminada
minimo = min(nota1, nota2, nota3, nota4, nota5)
# Calcular promedio de notas (se usa otra variable distinta a promedio)
prom = (nota1 + nota2 + nota3 + nota4 + nota5 - minimo) / 4
# Promedio con nota decimal
promedio = (prom * 5) / 100
# Redondear promedio nota decimal
promedio = round(promedio, 2)
# Salida
return "El promedio ajustado del estudiante {} es: {}".format(codigo, promedio)
pass
# Para comprobar la funcion
print(nota_quices("AA0010276", 45, 78, 33, 81, 93))
|
Python
|
CL
|
7bc28b4a00ba5966e01d7df26924783077e114c4b02dd5234101bd89cf689347
|
#!/usr/bin/env python
#Import the pandas library and call it pd
import pandas as pd
#Read the 'workouts.csv' that we exported from Training Peaks & list the column names
df = pd.read_csv('workouts.csv')
list(df.columns)
#Print the contents of the column called Title
print(df['Title'])
#Print the title of the 50th workout in the export
print(df['Title'][49])
#Read the 'metrics.csv' that we exported from Training Peaks (contains HRV data, sleep data, fatigue, soreness etc.)
metrics_df = pd.read_csv('metrics.csv')
print(metrics_df)
#Reformat so that we have all of the different metric types as individual columns across the top and each day as a row.
#Initially trying it on just the HRV metrics
HRV = metrics_df[metrics_df.Type=='HRV']
HRV = HRV.set_index('Timestamp')
HRV = HRV.drop('Type', axis=1)
HRV = HRV.rename(columns={'Value':'HRV'})
print(HRV)
#After seeing that it works, we built a function to apply it to the other metrics
def build_series(metric):
new_df = metrics_df[metrics_df.Type==str(metric)]
new_df = new_df.set_index('Timestamp')
new_df = new_df.drop('Type', axis=1)
new_df = new_df.rename(columns={'Value':str(metric)})
return new_df
HRV = build_series('HRV')
pulse = build_series('Pulse')
stress = build_series('Stress')
sleep_quality = build_series('Sleep Qualilty')
sleep_hours = build_series('Sleep Hours')
mood = build_series('Mood')
new_improved_metrics = pd.concat([HRV, pulse, stress, sleep_quality, sleep_hours, mood], axis=1)
print(new_improved_metrics)
#Back to the workouts csv to sum up the total training load (TSS) for each day so that we can add it to the same df as our metrics
workouts_df = pd.read_csv('workouts.csv')
TSS_day = workouts_df.groupby(['WorkoutDay'])['TSS'].sum()
print(TSS_day)
#Changing the index on our metrics csv so that it matches the same index as the workouts.csv
new_improved_metrics['Date'] = new_improved_metrics.index
new_improved_metrics['Date'].astype(str)
new_improved_metrics['Date'] = new_improved_metrics['Date'].str.slice(0,10)
print(new_improved_metrics['Date'])
new_improved_metrics = new_improved_metrics.set_index('Date')
#Joining the two dataframes together so we now have daily training load added to our metrics spreadsheet
new_improved_metrics = pd.concat([TSS_day, new_improved_metrics], axis=1)
print(new_improved_metrics)
#Visualizing what our dataframe fields consist of (type & count of data for each)
new_improved_metrics.info()
#Changing the data type from a string to a float (decimal number) so that we can perform some math on it.
metrics = ['HRV', 'Pulse', 'Stress', 'Sleep Qualilty', 'Sleep Hours', 'Mood']
for metric in metrics:
new_improved_metrics[metric] = new_improved_metrics[metric].astype(float)
new_improved_metrics.info()
#Performing some of that math - getting basic statistics on our data
new_improved_metrics.describe()
#Visualizing a frequency histogram for each of our data fields - on the lookout for weirdness/outliers.
import matplotlib.pyplot as plt
new_improved_metrics.hist(figsize=(20,15))
plt.show()
#Adding a new field that gets the training load from yesterday (as it is more likely to have an effect on our metrics for today)
new_improved_metrics['yesterday_TSS'] = new_improved_metrics['TSS'].shift(1)
print(new_improved_metrics)
#Visualizing the correlations between HRV and all of the other features
corr_matrix = new_improved_metrics.corr()
corr_matrix['HRV'].sort_values(ascending=False)
#Adding long term training load metrics - CTL, ATL, TSB to our dataframe.
import math
def calc_ctl(TSS:list, start_ctl, exponent):
ctl = [start_ctl]
for i in range(len(TSS)):
ctl_value = TSS[i] * (1-math.exp(-1/exponent)) + ctl[-1] * math.exp(-1/exponent)
ctl.append(ctl_value)
return ctl
ctl = calc_ctl(new_improved_metrics['TSS'], 103, 42)
atl = calc_ctl(new_improved_metrics['TSS'], 50, 7)
tsb = []
for i in range(len(ctl)):
tsb.append(ctl[i]-atl[i])
print(ctl)
print(atl)
print(tsb)
new_improved_metrics['ctl'] = ctl[1:]
new_improved_metrics['atl'] = atl[1:]
new_improved_metrics['tsb'] = tsb[1:]
print(new_improved_metrics)
#Visualizing statistics for these new metrics
new_improved_metrics.describe()
#Visualizing correlations for these new metrics
corr_matrix = new_improved_metrics.corr()
corr_matrix['HRV'].sort_values(ascending=False)
#Exporting our new, improved dataframe to a new improved csv so that we can apply the data to some machine learning in future posts.
new_improved_metrics.to_csv('new_improved_metrics.csv')
|
Python
|
CL
|
03880723d051f795481d5cbac46df5e0c1b82709f74cfc420a4b938abfa4d0af
|
import re
import sqlalchemy
import server.model.connection
def build_dicts(dim_table):
"""Returns dictionaries for cross-referencing ID fields to values.
Args:
dim_table: (str) Name of dimension table in scouting database.
Returns: A tuple containing two dictionaries. The keys of the first
dictionary are the values in the table's *name* column and the
values are the integer from the ID column for the same row. The
second dictionary has ID values for keys and the values are from
the *name* column.
"""
name_to_id = {}
id_to_name = {}
conn = server.model.connection.engine.connect()
if dim_table.lower() == "task_options":
sql = sqlalchemy.text("SELECT id, task_name||'-'||option_name "
"as name FROM task_options")
else:
sql = sqlalchemy.text("SELECT id, name FROM " + dim_table)
dim_res = conn.execute(sql)
for row in dim_res:
name_to_id[row["name"]] = row["id"]
id_to_name[row["id"]] = row["name"]
dim_res.close()
conn.close()
return id_to_name, name_to_id
date_names, date_ids = build_dicts("dates")
event_names, event_ids = build_dicts("events")
level_names, level_ids = build_dicts("levels")
match_names, match_ids = build_dicts("matches")
alliance_names, alliance_ids = build_dicts("alliances")
team_names, team_ids = build_dicts("teams")
station_names, station_ids = build_dicts("stations")
actor_names, actor_ids = build_dicts("actors")
task_names, task_ids = build_dicts("tasks")
measuretype_names, measuretype_ids = build_dicts("measuretypes")
phase_names, phase_ids = build_dicts("phases")
attempt_names, attempt_ids = build_dicts("attempts")
reason_names, reason_ids = build_dicts("reasons")
task_option_names, task_option_ids = build_dicts("task_options")
task_option_options = {key: re.sub(r"^[^-]+-", "", val, count=1)
for key, val in task_option_names.items()}
def rebuild_dicts():
global date_names, date_ids, event_names, event_ids
global level_names, level_ids, match_names, match_ids
global alliance_names, alliance_ids, team_names, team_ids
global station_names, station_ids, actor_names, actor_ids
global task_names, task_ids, measuretype_names, measuretype_ids
global phase_names, phase_ids, attempt_names, attempt_ids
global reason_names, reason_ids, task_option_names, task_option_ids
date_names, date_ids = build_dicts("dates")
event_names, event_ids = build_dicts("events")
level_names, level_ids = build_dicts("levels")
match_names, match_ids = build_dicts("matches")
alliance_names, alliance_ids = build_dicts("alliances")
team_names, team_ids = build_dicts("teams")
station_names, stations_ids = build_dicts("stations")
actor_names, actor_ids = build_dicts("actors")
task_names, task_ids = build_dicts("tasks")
measuretype_names, measuretype_ids = build_dicts("measuretypes")
phase_names, phase_ids = build_dicts("phases")
attempt_names, attempt_ids = build_dicts("attempts")
reason_names, reasons_ids = build_dicts("reasons")
task_option_names, task_option_ids = build_dicts("task_options")
|
Python
|
CL
|
973d631a6d426718dabf1233b8ff8ea65a3d51dd13ea65ac9839c4be4045ca6c
|
# coding: utf-8
"""
MessageMedia REST API
Australia’s Leading Messaging Solutions for Business and Enterprise.
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class DeliveryReport(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, delay=None, delivery_report_id=None, date_received=None, message_id=None, source_number=None, status=None, username=None):
"""
DeliveryReport - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'delay': 'int',
'delivery_report_id': 'str',
'date_received': 'datetime',
'message_id': 'str',
'source_number': 'str',
'status': 'str',
'username': 'str'
}
self.attribute_map = {
'delay': 'delay',
'delivery_report_id': 'delivery_report_id',
'date_received': 'date_received',
'message_id': 'message_id',
'source_number': 'source_number',
'status': 'status',
'username': 'username'
}
self._delay = delay
self._delivery_report_id = delivery_report_id
self._date_received = date_received
self._message_id = message_id
self._source_number = source_number
self._status = status
self._username = username
@property
def delay(self):
"""
Gets the delay of this DeliveryReport.
Not used
:return: The delay of this DeliveryReport.
:rtype: int
"""
return self._delay
@delay.setter
def delay(self, delay):
"""
Sets the delay of this DeliveryReport.
Not used
:param delay: The delay of this DeliveryReport.
:type: int
"""
self._delay = delay
@property
def delivery_report_id(self):
"""
Gets the delivery_report_id of this DeliveryReport.
Unique identifier of this delivery report.
:return: The delivery_report_id of this DeliveryReport.
:rtype: str
"""
return self._delivery_report_id
@delivery_report_id.setter
def delivery_report_id(self, delivery_report_id):
"""
Sets the delivery_report_id of this DeliveryReport.
Unique identifier of this delivery report.
:param delivery_report_id: The delivery_report_id of this DeliveryReport.
:type: str
"""
self._delivery_report_id = delivery_report_id
@property
def date_received(self):
"""
Gets the date_received of this DeliveryReport.
Date time when the delivery report was received in ISO8601 format.
:return: The date_received of this DeliveryReport.
:rtype: datetime
"""
return self._date_received
@date_received.setter
def date_received(self, date_received):
"""
Sets the date_received of this DeliveryReport.
Date time when the delivery report was received in ISO8601 format.
:param date_received: The date_received of this DeliveryReport.
:type: datetime
"""
self._date_received = date_received
@property
def message_id(self):
"""
Gets the message_id of this DeliveryReport.
Unique identifier of the message that this delivery report was matched to.
:return: The message_id of this DeliveryReport.
:rtype: str
"""
return self._message_id
@message_id.setter
def message_id(self, message_id):
"""
Sets the message_id of this DeliveryReport.
Unique identifier of the message that this delivery report was matched to.
:param message_id: The message_id of this DeliveryReport.
:type: str
"""
self._message_id = message_id
@property
def source_number(self):
"""
Gets the source_number of this DeliveryReport.
Address from which this delivery report was received.
:return: The source_number of this DeliveryReport.
:rtype: str
"""
return self._source_number
@source_number.setter
def source_number(self, source_number):
"""
Sets the source_number of this DeliveryReport.
Address from which this delivery report was received.
:param source_number: The source_number of this DeliveryReport.
:type: str
"""
if not source_number:
raise ValueError("Invalid value for `source_number`, must not be `None`")
if len(source_number) > 15:
raise ValueError("Invalid value for `source_number`, length must be less than `15`")
if len(source_number) < 1:
raise ValueError("Invalid value for `source_number`, length must be greater than or equal to `1`")
self._source_number = source_number
@property
def status(self):
"""
Gets the status of this DeliveryReport.
The status of the message as per the delivery report.
:return: The status of this DeliveryReport.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this DeliveryReport.
The status of the message as per the delivery report.
:param status: The status of this DeliveryReport.
:type: str
"""
self._status = status
@property
def username(self):
"""
Gets the username of this DeliveryReport.
Username that received this reply.
:return: The username of this DeliveryReport.
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""
Sets the username of this DeliveryReport.
Username that received this reply.
:param username: The username of this DeliveryReport.
:type: str
"""
self._username = username
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Python
|
CL
|
ab2f3b80ec1347c5af0696010b1a0b7674e11123c13ffd99bed9355494a81dd5
|
#!/usr/bin/env python3
from utility import *
def encode_pgm(msg, infile, outfile):
'''LSB encodes a message
Args:
msg (bytes): bytes object to encode
infile (str): name of the raw PGM file on disk to use as the cover
outfile (str): name of the new PGM file to write
Returns:
None
'''
pass
def decode_pgm(infile):
'''LSB decodes a message
Args:
infile (str): name of the PGM file to read/decode
Returns:
bytes: message that was decoded from the PGM file
'''
pass
if __name__ == '__main__':
pass
|
Python
|
CL
|
88a0628633741be6ba5b6da6ad0e6b273b609910a7e4b559451b646060c8ca20
|
from django.urls import include, path
from rest_framework import routers
from . import views
from rest_framework.authtoken.views import obtain_auth_token
router = routers.DefaultRouter()
router.register(r'sms', views.SmViewSet)
router.register(r'devices', views.DeviceViewSet)
router.register(r'college', views.CollegeViewSet)
router.register(r'meta', views.MetaViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('', include(router.urls)),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework')),
path('hey/', views.hey),
path('api-token-auth/', obtain_auth_token, name='api_token_auth'),
path('fcmPush/', views.fcmPush),
]
|
Python
|
CL
|
c85c405c6c496590ac9eae40e644dceac5105038aff9b2976a278a3c4619572e
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
import copy
import json
import logging
import requests
import time
import aiohttp
from queue import Queue, Empty as QueueEmptyError
from aiohttp import ClientSession
from hello_proxy_server.proxy import Proxy
from hello_proxy_server.settings import *
__CURRENT_IP__ = None
logger = logging.getLogger(__name__)
def get_current_ip():
global __CURRENT_IP__
if __CURRENT_IP__:
return __CURRENT_IP__
else:
r = requests.get(IP_CHECKER_API).json()
__CURRENT_IP__ = r['ip']
return __CURRENT_IP__
class Validator:
def __init__(self, task_queue: Queue, proxy_dict: dict, spider_name_proxy_dict: dict):
self.task_queue = task_queue
self.proxy_dict = proxy_dict
self.spider_name_proxy_dict = spider_name_proxy_dict
self.running = True
self.client_session = None
self.time = int(time.time())
def stop(self):
self.running = False
async def run(self):
self.client_session = self.gen_session()
async with self.client_session as session:
while self.running:
now = int(time.time())
if now - self.time > 45:
for ip, proxy in copy.deepcopy(self.proxy_dict).items():
if proxy.used_times >= 20:
logger.warning(f'{proxy.proxy_url} used_times >= 20, remove it!')
self.proxy_dict.pop(ip, '')
elif now - proxy.validate_time > 45:
logger.info('re_validate ' + proxy.ip)
# self.proxy_dict.pop(ip, '')
self.task_queue.put(proxy)
self.time = int(time.time())
if self.task_queue.empty():
await asyncio.sleep(2)
continue
proxy_list = []
for i in range(DEFAULT_CONCURRENT_VALIDATE):
try:
proxy_list.append(self.task_queue.get_nowait())
except QueueEmptyError:
break
task_list = [self.validate_proxy(session, p) for p in proxy_list]
await asyncio.gather(*task_list)
@staticmethod
def gen_session():
return ClientSession()
@staticmethod
def gen_proxy(proxy: Proxy):
proxy_url = proxy.proxy_url.replace('https', 'http')
user, pwd = proxy.user, proxy.pwd
proxy_auth = aiohttp.BasicAuth(user, pwd) if user and pwd else None
return proxy_url, proxy_auth
async def validate_proxy(self, session: ClientSession, proxy: Proxy):
proxy_url, proxy_auth = self.gen_proxy(proxy)
try:
async with session.get(IP_CHECKER_API, proxy=proxy_url,
proxy_auth=proxy_auth, timeout=10) as response:
body = await response.read()
if response.status == 200:
if json.loads(body)['ip'] != get_current_ip():
proxy.anonymous = True
proxy.validate_time = int(time.time())
proxy.used_times += 1
self.proxy_dict[proxy.ip] = proxy
logger.info(f'[{proxy.source}]>{proxy_url} validate successfully')
for proxy_dict in self.spider_name_proxy_dict.values():
proxy_dict.setdefault(proxy.ip, 0)
return proxy
except Exception:
logger.warning(f'[{proxy.source}]>{proxy_url} validate invalidly')
for proxy_dict in self.spider_name_proxy_dict.values():
proxy_dict.pop(proxy.ip, '')
self.proxy_dict.pop(proxy.ip, '')
|
Python
|
CL
|
4358dc3b29f38b4d839c4212983cd5f209432d6c0913a38eea0e5ba02ef97754
|
# Importing PyQt5 library to construct widgets for Graphic User Interface (GUI) application
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import (QLineEdit, QPushButton, QVBoxLayout, QHBoxLayout,
QWidget, QLabel,QMainWindow, QTableWidgetItem, QTableWidget, QMenu, QMessageBox)
from functools import partial
import shlex
from PyQt5.QtCore import Qt, pyqtSignal
class ComparisonUI(QMainWindow):
"""
Creates the comparison window that will be opened when the user clicks the compare button.
Purpose: Used to create plots using multiple trend reports from different imported spreadsheets
The user will type in names of the data tables and their corresponding row numbers which they wish
to plot from.
"""
MultiBoxSignal = pyqtSignal(list, list, str)
def __init__(self):
super(QMainWindow, self).__init__()
self.setWindowTitle("Perspective - MultiGraph Window")
# initialize Compare User Interface
self.CompareUI()
def CompareUI(self):
print("initialize widgets")
# Creates the mainwidget to hold/display all other widgets on
self.main_widget = QWidget(self)
self.setCentralWidget(self.main_widget)
# Creating descriptor labels
self.TableHeader = QLabel("Tables")
self.RowHeader = QLabel("Row Selection")
###MUST FOLLOW FORMAT EXACTLY### Table name must be a string, no symbols
# string is the automatic default no matter what no plans to make a conversion for integers
self.Instructions = QLabel(
'Input Format is as Follows... "Table Name", "row, numbers, seperated, with, commas" ')
"""
These aren't a QtableWidget these are QlineEdits that ask the user to input which table and which rows they
want to pull data from. Quotations are neccessary.
*Quotations allow function shelex.split() to split on quotations and then store in a list
"""
self.TableRowEntry1 = QLineEdit(' "Table", "Rows Numbers" ')
self.TableRowEntry2 = QLineEdit(' "Table", "Rows Numbers" ')
self.TableRowEntry3 = QLineEdit(' "Table", "Rows Numbers" ')
self.TableRowEntry4 = QLineEdit(' "Table", "Rows Numbers" ')
self.TableRowEntry5 = QLineEdit(' "Table", "Rows Numbers" ')
##Create buttons for box and scatter plot
self.BoxPlot = QPushButton("Box Plot")
self.ScatterPlot = QPushButton("Scatter Plot")
"""
This is prob not going to work Overload???
does the on enter need to be for every btn????
"""
self.TableRowEntry1.returnPressed.connect(self.BoxPlot.click)
self.TableRowEntry2.returnPressed.connect(self.BoxPlot.click)
self.TableRowEntry3.returnPressed.connect(self.BoxPlot.click)
self.TableRowEntry4.returnPressed.connect(self.BoxPlot.click)
self.TableRowEntry5.returnPressed.connect(self.BoxPlot.click)
# <editor-fold desc="Error msg feedback">
###WORK IN PROGRESS: Error msgs and error handling###
self.ErrorMsg = QMessageBox()
self.ErrorMsg.setText("Nothing for now")
self.ErrorMsg.setInformativeText("This is additional information")
self.ErrorMsg.setWindowTitle("Error Message")
self.ErrorMsg.setDetailedText("The details are as follows:")
# </editor-fold>
# Initial values (TEST VALUES)
self.Label = QLabel("Empty right now")
self.text = "initial string"
"""
Currently these values are just the strings from default setting of the QlineEdit
.text() grabs whatever text is inside the QLineEdits, which right now is ' "Table", "Rows Numbers" '
"""
self.Entry1 = self.TableRowEntry1.text()
self.Entry2 = self.TableRowEntry2.text()
self.Entry3 = self.TableRowEntry3.text()
self.Entry4 = self.TableRowEntry4.text()
self.Entry5 = self.TableRowEntry5.text()
"""
Checks to see if the text in the QlineEdit has changed and if it has it passes the arguments a string, and a
number representing which QlineEdit was modified, 1-5 descending order.
partial allows for the passing of multiple arguments through, what is normally only one
"""
self.TableRowEntry1.textChanged[str].connect(partial(self.onChanged, entryNum=1))
self.TableRowEntry2.textChanged[str].connect(partial(self.onChanged, entryNum=2))
self.TableRowEntry3.textChanged[str].connect(partial(self.onChanged, entryNum=3))
self.TableRowEntry4.textChanged[str].connect(partial(self.onChanged, entryNum=4))
self.TableRowEntry5.textChanged[str].connect(partial(self.onChanged, entryNum=5))
print("About to call plotting functions")
###Takes the Entrys that have been made connects it my my methods Box/ScatterPlotCall###
self.BoxPlot.clicked.connect(self.BoxPlotCall)
self.ScatterPlot.clicked.connect(self.ScatterPlotCall)
###LAYOUT###
# <editor-fold desc="Layout">
self.vboxMain = QVBoxLayout(self.main_widget)
self.VboxCOMP1 = QVBoxLayout()
self.vboxMain.addLayout(self.VboxCOMP1)
self.Header = QHBoxLayout()
self.Header.addWidget(self.TableHeader)
self.Header.addWidget(self.RowHeader)
self.subHeader = QHBoxLayout()
self.subHeader.addStretch()
self.subHeader.addWidget(self.Instructions)
self.hboxCOMP1 = QHBoxLayout()
# self.hboxCOMP1.addStretch()
self.hboxCOMP1.addWidget(self.TableRowEntry1)
self.hboxCOMP1.addStretch()
self.hboxCOMP2 = QHBoxLayout()
# self.hboxCOMP2.addStretch()
self.hboxCOMP2.addWidget(self.TableRowEntry2)
self.hboxCOMP2.addStretch()
self.hboxCOMP3 = QHBoxLayout()
# self.hboxCOMP3.addStretch()
self.hboxCOMP3.addWidget(self.TableRowEntry3)
self.hboxCOMP3.addStretch()
self.hboxCOMP4 = QHBoxLayout()
# self.hboxCOMP3.addStretch()
self.hboxCOMP4.addWidget(self.TableRowEntry4)
self.hboxCOMP4.addStretch()
self.hboxCOMP5 = QHBoxLayout()
# self.hboxCOMP3.addStretch()
self.hboxCOMP5.addWidget(self.TableRowEntry5)
self.hboxCOMP5.addStretch()
self.hboxPlotBtns = QHBoxLayout()
self.hboxPlotBtns.addWidget(self.BoxPlot)
self.hboxPlotBtns.addWidget(self.ScatterPlot)
self.hboxPlotBtns.addStretch()
self.hboxRAND = QHBoxLayout()
self.hboxRAND.addWidget(self.Label)
### ADD TO PAGE ###
self.VboxCOMP1.addLayout(self.Header)
self.VboxCOMP1.addLayout(self.subHeader)
self.VboxCOMP1.addLayout(self.hboxCOMP1)
self.VboxCOMP1.addLayout(self.hboxCOMP2)
self.VboxCOMP1.addLayout(self.hboxCOMP3)
self.VboxCOMP1.addLayout(self.hboxCOMP4)
self.VboxCOMP1.addLayout(self.hboxCOMP5)
self.VboxCOMP1.addLayout(self.hboxPlotBtns)
# self.VboxCOMP1.addLayout(self.hboxRAND)
# </editor-fold>
def onChanged(self, text, entryNum):
"""
Purpose: checks to see which QlineEdit was modfied using the QlineEdit number
takes the text in the LineEdit and defines it as the User's entry text
"""
if entryNum == 1:
self.text = text
userIN = str(self.text)
self.Entry1 = userIN
elif entryNum == 2:
self.text = text
userIN = str(self.text)
self.Entry2 = userIN
elif entryNum == 3:
self.text = text
userIN = str(self.text)
self.Entry3 = userIN
elif entryNum == 4:
self.text = text
userIN = str(self.text)
self.Entry4 = userIN
elif entryNum == 5:
self.text = text
userIN = str(self.text)
self.Entry5 = userIN
else:
print("something was passed")
pass
# List with all possible entries looking at a maximum of 5???
# possibly problematic in case future iterations want to include more entries
###Create a list containg all entries the user has input###
self.ALLentries = [self.Entry1, self.Entry2, self.Entry3, self.Entry4, self.Entry5]
def CheckEntries(self):
"""
Purpose
Check to see which entries have been modified returns True if user input is detected
The check is run by comparing the current state (string) to the default string which is...
"Table", "Rows Numbers"
QUOTATIONS ARE NECCESSARY in order to parse user input later on with shelex() method
"""
if self.Entry1 == ' "Table", "Rows Numbers" ':
self.userInput = False
else:
self.userInput = True
if self.Entry2 == ' "Table", "Rows Numbers" ':
self.userInput2 = False
else:
self.userInput2 = True
if self.Entry3 == ' "Table", "Rows Numbers" ':
self.userInput3 = False
else:
self.userInput3 = True
if self.Entry4 == ' "Table", "Rows Numbers" ':
self.userInput4 = False
else:
self.userInput4 = True
if self.Entry5 == ' "Table", "Rows Numbers" ':
self.userInput5 = False
else:
self.userInput5 = True
def BoxPlotCall(self):
# runs method CheckEntries to see which LineEdits were actually modfied, returns booleans to expediate the next proccess
self.CheckEntries()
# List of truth values see which lineEdits the user has changed
# most likely put this in my checkEntries method
UserInputList = [self.userInput, self.userInput2, self.userInput3, self.userInput4, self.userInput5]
# List to save index for the tables whos QlineEdits have changed
MutableList = []
"""
Enumerate goes through a list and creates an order pair with the item and its index.
For example the list [apple, grape, berry], once enumerated, becomes [(0, apple),(1, grape),(2, berry)]
"""
UserInputList = list(enumerate(UserInputList))
print("enumerated userinput list")
print(UserInputList)
###For loop runs through our list of booleans and checks to see which ones are True ('modifed from user inputting something')
for index, item in enumerate(UserInputList, start=0):
print("item is .... {}".format(item))
# print([item[1] for i in UserInputList])
# checks the ordered pair and look at the second item in the pair to check the boolean
if item[1] == True:
print(item)
# appends the index for that item to a list to be referenced later on
MutableList.append(index)
###Print Statements for debugging###
print("printing index ... {}".format(index))
print("printing mutablelist ... ")
print(MutableList)
else:
pass
# Want to call for all tables that have been changed
# We have the numbers now we want to attach those to our variable and call it as such
print("Parsing is about to begin")
ParsedList = [] # initiate a list to store all parsed strings from the QlineEdits
self.RowNumList = [] # initiate a list to store Row numbers
self.TableList = [] # initiate a list to store Table Names
"""
For loop parses string and stores parsed strings into two seperate lists
one list for row numbers and the other for the table names
"""
i = 0
for i in range(len(MutableList)):
print("mutable list is as follows...")
print(MutableList)
print("Inisde parse loop, i value is {}".format(i))
print(self.ALLentries[MutableList[i]])
"""
shelex.split() is a unique parser in that it seperates based on quotations (" ")
This used to seperate the Table Names, and Row Numbers since both are surrounded by quotations
"""
ParsedString = shlex.split(self.ALLentries[MutableList[i]])
print("parsed string is ..... {}".format(ParsedString))
print(type(ParsedString))
# attach parsed string to our list
ParsedList.append(ParsedString)
print(ParsedList)
print("second element of string ... {}".format(ParsedList[i][1]))
###Selecting the row numbers###_____________________________________________________________________________
# further split the nested list using the fact each item is sepereated with a comma
RowVals = ParsedList[i][1].split(',')
print("ROWS TO BE SELECTED are as follows...")
print(RowVals)
print("right before mapping")
# convert the strings into integers
RowVals = list(map(int, RowVals))
print("Row Values after mapping has taken place")
print(RowVals)
self.RowNumList.append(RowVals)
print(self.RowNumList)
# DEBUGGING STATEMENTS TO CHECK OUTPUT
# print(self.RowNumList)
# print(type(self.RowNumList[0]))
###Selecting the Table Number###____________________________________________________________________________
# Methodology is similar to RowVal collection above. Follows almost same proccess
print("Right before appending things to Table List")
TableEntry = ParsedList[i][0].split(',')
print("Table list is as follows")
print(TableEntry)
print(type(TableEntry[0]))
print("first element of table entry is .... {}".format(TableEntry[0]))
"""
Error Handling depending on user input if the table name is a number vs a string we have to
approach the problem differently. First checks to see if the string can be converted to an integer else
it just continues on as a string.
POSSIBLE BUG: if it can be converted to integer might skip all below code and just end. ErrorHandling W.I.P.
"""
TableVal = (TableEntry[0])
self.TableList.append(TableVal)
print("current counter is ............... {}".format(i))
# string value so we know when we call plot to do a boxplot
self.PlotCompareVal = "box"
###output check###
print("Parsedstring list is here .......... {}".format(ParsedList))
print("Row numbers list is as follows.... {}".format(self.RowNumList))
print("Table List is as follows....{}".format(self.TableList))
print(self.TableList)
# This is the data for which Table Names and which Rows need to be plotted
print(type(self.RowNumList))
print(type(self.TableList))
###SIGNAL EMMITTION W.I.P.
self.MultiBoxSignal.emit(self.TableList, self.RowNumList, self.PlotCompareVal)
def ScatterPlotCall(self):
print("scatter plot called")
### W.I.P###
|
Python
|
CL
|
02025d669917c1937ecf31a97327e13368ce89d8ddf266b32f40b980a0c87e18
|
#!/usr/bin/python
# this program is for running cmfid docker image
# for output. It will use multiprocessing to make
# 2 threads to call cmfid docker image and get results
# cuncurrently until all the jobs done in input file.
import os
import sys
import config
from util import get_args, split_input_file
import re
import subprocess
from multiprocessing import Pool
import time
def jobs(argvs):
file = argvs[0]
der_arg = argvs[1]
done_file = file + ".done"
done_list = []
if os.path.isfile(done_file):
with open(done_file, 'r') as df:
done_list = df.read().split("\n")
#done_list = set(done_list)
if not der_arg:
case_dir = config.case_dir
dir_back_to_host = config.dir_back_to_host
out_dir = config.out_dir
else:
case_dir = config.case_dir_d
dir_back_to_host = config.dir_back_to_host_d
out_dir = config.out_dir_d
with open(file, "r") as f:
for line in f:
if not der_arg:
m = re.search("^(\S*)\t(\S*)", line)
hmdb_id = m.group(1)
smile = m.group(2)
else:
m = re.search("^(\S*)\t(\S+)\t(\S+)\t(\S+)", line)
hmdb_id = m.group(1)
smile = m.group(4)
deri = m.group(2)
cnt = m.group(3)
if hmdb_id in done_list:
continue
for c in case_dir:
if not der_arg:
remote_file = "{0}/{1}/{2}*".format(dir_back_to_host, c, hmdb_id)
else:
remote_file = "{0}/{1}/{2}_{3}_{4}*".format(dir_back_to_host, c, \
hmdb_id, deri, cnt)
cm1 = "ssh centos_1 \"ls {0}\"".format(remote_file)
count = 0
read_out = ''
while(1):
count += 1
rp = subprocess.Popen(cm1, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
rp.wait()
read_out = rp.stdout.read()
if read_out:
break
if count == 3:
break
time.sleep(3)
if read_out:
#print >> sys.stderr, "Bypass " + remote_file
continue
#print >> sys.stderr, "Run " + remote_file
if c == 'positive':
param_file = "/root/param_output0.log"
config_file = "/root/param_config.txt"
elif c == 'negative':
param_file = "/root/negative_param_output0.log"
config_file = "/root/negative_param_config.txt"
elif c == 'ei':
param_file = "/root/ei_param_output.log"
config_file = "/root/ei_param_config.txt"
if not der_arg:
out_file = "{0}.log".format(hmdb_id)
else:
out_file = "{0}_{1}_{2}.log".format(hmdb_id, deri, cnt)
cmd = ("docker run --rm=true -v {0}:/root " + ""
"-i cfmid:latest sh -c \"cd /root/; cfm-predict " +
"'{1}' 0.001 {2} {3} " +
"1 /root/{4}/{5}; chmod 777 /root/{4}/{5}\" ").format(out_dir, smile, \
param_file, config_file, c, \
out_file)
x = config.timeout * 60
delay = 3
timeout = int(x / delay)
output_file = "{0}/{1}/{2}".format(out_dir, c, out_file)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT, shell=True)
while p.poll() is None and timeout > 0:
time.sleep(delay)
if os.path.isfile(output_file):
timeout = 0
else:
timeout -= delay
#p.wait()
if timeout == 0:
p.kill()
# the above just kill 'docker run', the below will kill the exec in docker container.
cmd = "ps aux|grep '.:.. cfm-'|grep {0}|tr -s ' '|cut -d ' ' -f 2|xargs sudo kill -9 ".format(hmdb_id)
p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT, shell=True)
p1.wait()
file_to_master_host = ''
# if failed, generate fail file
if not os.path.isfile(output_file) or os.stat(output_file).st_size == 0:
if not der_arg:
fail_file = "{0}/{1}/{2}".format(out_dir, c, "{0}.fail".format(hmdb_id))
else:
fail_file = "{0}/{1}/{2}".format(out_dir, c, "{0}_{1}_{2}.fail".format(hmdb_id, deri, cnt))
with open(fail_file, "w") as ff:
ff.write(cmd + "\n")
if timeout == 0:
ff.write("Time out {0} minutes!\n".format(config.timeout))
else:
ff.write(p.stdout.read() + "\n")
file_to_master_host = fail_file
else:
file_to_master_host = output_file
# transfer back to master host
cm = "scp {0} centos_1:{1}/{2}/.".format(file_to_master_host, dir_back_to_host, c)
cn =0
while(cn <3):
p = subprocess.Popen(cm, stdout=subprocess.PIPE, \
stderr=subprocess.STDOUT, shell=True)
p.wait()
cn += 1
# delete the result file
os.remove(file_to_master_host)
#write done file
if not hmdb_id in done_list:
with open(done_file, "a") as df:
df.write(hmdb_id + "\n")
def run_jobs(input_file, pieces, der_arg):
argvs = []
for i in range(1, pieces+1):
file = "{0}_{1}".format(input_file, i)
argvs.append((file, der_arg))
p = Pool(pieces)
print argvs
p.map(jobs, argvs)
p.close()
p.join()
def main():
der_arg, input_file = get_args("run_job_each_host.py")
print "python ../bin/run_job_each_host.py {0} {1}".format(input_file, der_arg)
pieces = split_input_file(input_file, config.pieces_in_each_host)
print "Pieces {0}".format(pieces)
run_jobs(input_file, pieces, der_arg)
print "Program exit!"
if __name__ == "__main__":
main()
|
Python
|
CL
|
8b1e89069e4f40038db3192e926a269d0f77202758a4f33cd18114eeaa38c0ca
|
#!/usr/bin/env python3
import urllib, json, sys, os
import requests # 'pip install requests'
import boto3 # AWS SDK for Python (Boto3) 'pip install boto3'
# Step 1: Authenticate user in your own identity system.
# Step 2: Using the access keys for an IAM user in your AWS account,
# call "AssumeRole" to get temporary access keys for the federated user
# Note: Calls to AWS STS AssumeRole must be signed using the access key ID
# and secret access key of an IAM user or using existing temporary credentials.
# The credentials can be in EC2 instance metadata, in environment variables,
# or in a configuration file, and will be discovered automatically by the
# client('sts') function. For more information, see the Python SDK docs:
# http://boto3.readthedocs.io/en/latest/reference/services/sts.html
# http://boto3.readthedocs.io/en/latest/reference/services/sts.html#STS.Client.assume_role
sts_connection = boto3.client('sts')
# Step 3: Format resulting temporary credentials into JSON
url_credentials = {}
url_credentials['sessionId'] = os.getenv("AWS_ACCESS_KEY_ID")
url_credentials['sessionKey'] = os.getenv("AWS_SECRET_ACCESS_KEY")
url_credentials['sessionToken'] = os.getenv("AWS_SESSION_TOKEN")
json_string_with_temp_credentials = json.dumps(url_credentials)
# Step 4. Make request to AWS federation endpoint to get sign-in token. Construct the parameter string with
# the sign-in action request, a 12-hour session duration, and the JSON document with temporary credentials
# as parameters.
request_parameters = "?Action=getSigninToken"
request_parameters += "&SessionDuration=43200"
if sys.version_info[0] < 3:
def quote_plus_function(s):
return urllib.quote_plus(s)
else:
def quote_plus_function(s):
return urllib.parse.quote_plus(s)
request_parameters += "&Session=" + quote_plus_function(json_string_with_temp_credentials)
request_url = "https://signin.aws.amazon.com/federation" + request_parameters
r = requests.get(request_url)
# Returns a JSON document with a single element named SigninToken.
signin_token = json.loads(r.text)
# Step 5: Create URL where users can use the sign-in token to sign in to
# the console. This URL must be used within 15 minutes after the
# sign-in token was issued.
request_parameters = "?Action=login"
request_parameters += "&Issuer=Example.org"
request_parameters += "&Destination=" + quote_plus_function("https://console.aws.amazon.com/")
request_parameters += "&SigninToken=" + signin_token["SigninToken"]
request_url = "https://signin.aws.amazon.com/federation" + request_parameters
os.execlp("open", "open", request_url)
|
Python
|
CL
|
2b614f5b667c1cb4c00c622997c3bc44afbf79d1059ab494460819e192ccd7d4
|
#!/usr/bin/python
# Copyright: 2017, CCX Technologies
import ctypes
import socket
import fcntl
# Generic MII registers.
MII_BMCR = 0x00 # Basic mode control register
MII_BMSR = 0x01 # Basic mode status register
MII_PHYSID1 = 0x02 # PHYS ID 1
MII_PHYSID2 = 0x03 # PHYS ID 2
MII_ADVERTISE = 0x04 # Advertisement control reg
MII_LPA = 0x05 # Link partner ability reg
MII_EXPANSION = 0x06 # Expansion register
MII_CTRL1000 = 0x09 # 1000BASE-T control
MII_STAT1000 = 0x0a # 1000BASE-T status
MII_MMD_CTRL = 0x0d # MMD Access Control Register
MII_MMD_DATA = 0x0e # MMD Access Data Register
MII_ESTATUS = 0x0f # Extended Status
MII_DCOUNTER = 0x12 # Disconnect counter
MII_FCSCOUNTER = 0x13 # False carrier counter
MII_NWAYTEST = 0x14 # N-way auto-neg test reg
MII_RERRCOUNTER = 0x15 # Receive error counter
MII_SREVISION = 0x16 # Silicon revision
MII_RESV1 = 0x17 # Reserved...
MII_LBRERROR = 0x18 # Loopback, rx, bypass error
MII_PHYADDR = 0x19 # PHY address
MII_RESV2 = 0x1a # Reserved...
MII_TPISTATUS = 0x1b # TPI status for 10Mbps
MII_NCONFIG = 0x1c # Network interface config
# Basic mode control register.
BMCR_RESV = 0x003f # Unused...
BMCR_SPEED1000 = 0x0040 # MSB of Speed (1000)
BMCR_CTST = 0x0080 # Collision test
BMCR_FULLDPLX = 0x0100 # Full duplex
BMCR_ANRESTART = 0x0200 # Auto negotiation restart
BMCR_ISOLATE = 0x0400 # Isolate data paths from MII
BMCR_PDOWN = 0x0800 # Enable low power state
BMCR_ANENABLE = 0x1000 # Enable auto negotiation
BMCR_SPEED100 = 0x2000 # Select 100Mbps
BMCR_LOOPBACK = 0x4000 # TXD loopback bits
BMCR_RESET = 0x8000 # Reset to default state
# Basic mode status register.
BMSR_ERCAP = 0x0001 # Ext-reg capability
BMSR_JCD = 0x0002 # Jabber detected
BMSR_LSTATUS = 0x0004 # Link status
BMSR_ANEGCAPABLE = 0x0008 # Able to do auto-negotiation
BMSR_RFAULT = 0x0010 # Remote fault detected
BMSR_ANEGCOMPLETE = 0x0020 # Auto-negotiation complete
BMSR_RESV = 0x00c0 # Unused...
BMSR_ESTATEN = 0x0100 # Extended Status in R15
BMSR_100HALF2 = 0x0200 # Can do 100BASE-T2 HDX
BMSR_100FULL2 = 0x0400 # Can do 100BASE-T2 FDX
BMSR_10HALF = 0x0800 # Can do 10Mbps, half-duplex
BMSR_10FULL = 0x1000 # Can do 10Mbps, full-duplex
BMSR_100HALF = 0x2000 # Can do 100Mbps, half-duplex
BMSR_100FULL = 0x4000 # Can do 100Mbps, full-duplex
BMSR_100BASE4 = 0x8000 # Can do 100Mbps, 4k packets
# Advertisement control register.
ADVERTISE_SLCT = 0x001f # Selector bits
ADVERTISE_CSMA = 0x0001 # Only selector supported
ADVERTISE_10HALF = 0x0020 # Try for 10Mbps half-duplex
ADVERTISE_1000XFULL = 0x0020 # Try for 1000BASE-X full-duplex
ADVERTISE_10FULL = 0x0040 # Try for 10Mbps full-duplex
ADVERTISE_1000XHALF = 0x0040 # Try for 1000BASE-X half-duplex
ADVERTISE_100HALF = 0x0080 # Try for 100Mbps half-duplex
ADVERTISE_1000XPAUSE = 0x0080 # Try for 1000BASE-X pause
ADVERTISE_100FULL = 0x0100 # Try for 100Mbps full-duplex
ADVERTISE_1000XPSE_ASYM = 0x0100 # Try for 1000BASE-X asymmetric pause
ADVERTISE_100BASE4 = 0x0200 # Try for 100Mbps 4k packets
ADVERTISE_PAUSE_CAP = 0x0400 # Try for pause
ADVERTISE_PAUSE_ASYM = 0x0800 # Try for asymmetric pause
ADVERTISE_RESV = 0x1000 # Unused...
ADVERTISE_RFAULT = 0x2000 # Say we can detect faults
ADVERTISE_LPACK = 0x4000 # ACK link partners response
ADVERTISE_NPAGE = 0x8000 # Next page bit
ADVERTISE_FULL = (ADVERTISE_100FULL | ADVERTISE_10FULL | ADVERTISE_CSMA)
ADVERTISE_ALL = (
ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF
| ADVERTISE_100FULL
)
# Link partner ability register.
LPA_SLCT = 0x001f # Same as advertise selector
LPA_10HALF = 0x0020 # Can do 10Mbps half-duplex
LPA_1000XFULL = 0x0020 # Can do 1000BASE-X full-duplex
LPA_10FULL = 0x0040 # Can do 10Mbps full-duplex
LPA_1000XHALF = 0x0040 # Can do 1000BASE-X half-duplex
LPA_100HALF = 0x0080 # Can do 100Mbps half-duplex
LPA_1000XPAUSE = 0x0080 # Can do 1000BASE-X pause
LPA_100FULL = 0x0100 # Can do 100Mbps full-duplex
LPA_1000XPAUSE_ASYM = 0x0100 # Can do 1000BASE-X pause asymmetric
LPA_100BASE4 = 0x0200 # Can do 100Mbps 4k packets
LPA_PAUSE_CAP = 0x0400 # Can pause
LPA_PAUSE_ASYM = 0x0800 # Can pause asymmetrically
LPA_RESV = 0x1000 # Unused...
LPA_RFAULT = 0x2000 # Link partner faulted
LPA_LPACK = 0x4000 # Link partner ACKed us
LPA_NPAGE = 0x8000 # Next page bit
LPA_DUPLEX = (LPA_10FULL | LPA_100FULL)
LPA_100 = (LPA_100FULL | LPA_100HALF | LPA_100BASE4)
# Expansion register for auto-negotiation.
EXPANSION_NWAY = 0x0001 # Can do N-way auto-negotiation
EXPANSION_LCWP = 0x0002 # Got new RX page code word
EXPANSION_ENABLENPAGE = 0x0004 # This enables npage words
EXPANSION_NPCAPABLE = 0x0008 # Link partner supports npage
EXPANSION_MFAULTS = 0x0010 # Multiple faults detected
EXPANSION_RESV = 0xffe0 # Unused...
ESTATUS_1000_TFULL = 0x2000 # Can do 1000baseT Full
ESTATUS_1000_THALF = 0x1000 # Can do 1000baseT Half
# N-way test register.
NWAYTEST_RESV1 = 0x00ff # Unused...
NWAYTEST_LOOPBACK = 0x0100 # Enable loopback for N-way
NWAYTEST_RESV2 = 0xfe00 # Unused...
# 1000BASE-T Control register
ADVERTISE_1000FULL = 0x0200 # Advertise 1000BASE-T full duplex
ADVERTISE_1000HALF = 0x0100 # Advertise 1000BASE-T half duplex
CTL1000_AS_MASTER = 0x0800
CTL1000_ENABLE_MASTER = 0x1000
# 1000BASE-T Status register
LPA_1000LOCALRXOK = 0x2000 # Link partner local receiver status
LPA_1000REMRXOK = 0x1000 # Link partner remote receiver status
LPA_1000FULL = 0x0800 # Link partner 1000BASE-T full duplex
LPA_1000HALF = 0x0400 # Link partner 1000BASE-T half duplex
# Flow control flags
FLOW_CTRL_TX = 0x01
FLOW_CTRL_RX = 0x02
# MMD Access Control register fields
MII_MMD_CTRL_DEVAD_MASK = 0x1f # Mask MMD DEVAD
MII_MMD_CTRL_ADDR = 0x0000 # Address
MII_MMD_CTRL_NOINCR = 0x4000 # no post increment
MII_MMD_CTRL_INCR_RDWT = 0x8000 # post increment on reads & writes
MII_MMD_CTRL_INCR_ON_WT = 0xC000 # post increment on writes only
# MII SOIC Commands
SIOCGMIIPHY = 0x8947 # Get address of MII PHY in use.
SIOCGMIIREG = 0x8948 # Read MII PHY register.
SIOCSMIIREG = 0x8949 # Write MII PHY register.
class mii_ioctl_data(ctypes.Structure):
_pack_ = 1
_fields_ = [
('phy_id', ctypes.c_uint16),
('reg_num', ctypes.c_uint16),
('val_in', ctypes.c_uint16),
('val_out', ctypes.c_uint16),
]
class ifreq(ctypes.Structure):
_pack_ = 1
_fields_ = [('ifr_name', (ctypes.c_ubyte * 16)), ('data', mii_ioctl_data)]
def mdio_read_reg(ifname, reg):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
ifr = ifreq()
ifr.ifr_name = (ctypes.c_ubyte * 16)(*bytearray(ifname.encode()))
fcntl.ioctl(sock, SIOCGMIIPHY, ifr)
ifr.data.reg_num = reg
fcntl.ioctl(sock, SIOCGMIIPHY, ifr)
return ifr.data.val_out
|
Python
|
CL
|
5c9914c8a2c51631ba16ba128eb9e740e21d961267aad371c64abc1ebd242951
|
#
# PySNMP MIB module FOUNDRY-SN-AGENT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/FOUNDRY-SN-AGENT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:40:54 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
snStack, snAgentSys, snChassis = mibBuilder.importSymbols("FOUNDRY-SN-ROOT-MIB", "snStack", "snAgentSys", "snChassis")
CounterBasedGauge64, = mibBuilder.importSymbols("HCNUM-TC", "CounterBasedGauge64")
InterfaceIndex, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndex")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
EnabledStatus, = mibBuilder.importSymbols("P-BRIDGE-MIB", "EnabledStatus")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter64, NotificationType, iso, TimeTicks, ObjectIdentity, Integer32, MibIdentifier, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Counter32, Unsigned32, Bits, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "NotificationType", "iso", "TimeTicks", "ObjectIdentity", "Integer32", "MibIdentifier", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Counter32", "Unsigned32", "Bits", "ModuleIdentity")
TruthValue, TextualConvention, TimeStamp, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "TimeStamp", "DisplayString", "RowStatus")
snAgent = ModuleIdentity((1, 3, 6, 1, 4, 1, 1991, 4))
snAgent.setRevisions(('2014-03-10 00:00', '2013-02-28 00:00', '2012-09-21 00:00', '2012-04-18 00:00', '2012-01-24 00:00', '2011-12-22 00:00', '2010-06-02 00:00', '2009-09-30 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: snAgent.setRevisionsDescriptions(('Deprecated brcdSwIntfModAutoUpgradeAllImages OID.', 'Added snAgentBrdUpTime object.', 'Added snAgGblPasswordCheckMode object.', 'Added downloadSPMonitor enum to snAgImgLoad object.', 'Added snAgentCpuProcessEnable object.', 'Major Changes in NI 5.3: Added MIBs for Simplified Upgrade, Fixes for status for 100G module in blocked slot, SNMP support for MR2 module Description Updates.', 'Changed the ORGANIZATION, CONTACT-INFO and DESCRIPTION fields.', 'convert from SMIv1 to SMIv2',))
if mibBuilder.loadTexts: snAgent.setLastUpdated('201209210000Z')
if mibBuilder.loadTexts: snAgent.setOrganization('Brocade Communications Systems, Inc.')
if mibBuilder.loadTexts: snAgent.setContactInfo('Technical Support Center 130 Holger Way, San Jose, CA 95134 Email: ipsupport@brocade.com Phone: 1-800-752-8061 URL: www.brocade.com')
if mibBuilder.loadTexts: snAgent.setDescription("Copyright 1996-2010 Brocade Communications Systems, Inc. All rights reserved. This Brocade Communications Systems SNMP Management Information Base Specification embodies Brocade Communications Systems' confidential and proprietary intellectual property. Brocade Communications Systems retains all title and ownership in the Specification, including any revisions. This Specification is supplied AS IS, and Brocade Communications Systems makes no warranty, either express or implied, as to the use, operation, condition, or performance of the specification, and any unintended consequence it may on the user environment.")
class MacAddress(TextualConvention, OctetString):
description = ' This data type is used to model media addresses. For many types of media, this will be in a binary representation. For example, an ethernet address would be represented as a string of 6 octets. '
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(6, 6)
fixedLength = 6
class DisplayString(TextualConvention, OctetString):
description = 'This data type is used to model textual information taken from the NVT ASCII character set. By convention, objects with this syntax are declared as having SIZE (0..255)'
status = 'current'
class BrcdImageType(TextualConvention, Integer32):
description = 'Image types supported by XMR/MLX and CES/CER'
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28))
namedValues = NamedValues(("unknown", 1), ("managementModuleBoot", 2), ("managementModuleMonitor", 3), ("managementModuleApplication", 4), ("interfaceModuleBoot", 5), ("interfaceModuleMonitor", 6), ("interfaceModuleApplication", 7), ("mgmtAndIntfModuleCombinedApp", 8), ("fpgaMBridge", 9), ("fpgaMBridge32", 10), ("fpgaSBridge", 11), ("fpgaHBridge", 12), ("fpgaBundled", 13), ("fpgaPbifOc", 14), ("fpgaStatsOc", 15), ("fpgaXppOc", 16), ("fpgaPbifMrj", 17), ("fpgaStatsMrj", 18), ("fpgaXppMrj", 19), ("fpgaPbifSp2", 20), ("fpgaXgmacSp2", 21), ("fpgaXppSp2", 22), ("fpgaPbif8x10", 23), ("fpgaXpp8x10", 24), ("fpgaXpp2x100", 25), ("fpgaPbifMetro", 26), ("fpgaXpp4x40", 27), ("fpgaPbif4x40", 28))
snChasGen = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1))
snChasPwr = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2))
snChasFan = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3))
snChasUnit = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4))
snChasType = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasType.setStatus('current')
if mibBuilder.loadTexts: snChasType.setDescription('The chassis type represents the type of Foundry product being managed. ')
snChasSerNum = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasSerNum.setStatus('current')
if mibBuilder.loadTexts: snChasSerNum.setDescription('The serial number of the chassis. If the serial number is unknown or unavailable then the value should be a zero length string.')
snChasPwrSupplyStatus = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasPwrSupplyStatus.setStatus('deprecated')
if mibBuilder.loadTexts: snChasPwrSupplyStatus.setDescription('This object is being deprecated, please use snChasPwrSupplyTable instead. A bit array that contains the value of the Chassis Power Supplies. This is a packed bit string; the 2 power supplies status are encoded into 4 bits (a nibble). There are multiple power supplies per chassis in this release. The following shows the meaning of each bit: (bit 0 is the least significant bit). bit position meaning ------------ ------- 20-31 reserved 19 Power Supply 8 DC (0=bad, 1=good). 18 Power Supply 7 DC (0=bad, 1=good). 17 Power Supply 8 present status (0=present, 1=not-present). 16 Power Supply 7 present status (0=present, 1=not-present). 15 Power Supply 6 DC (0=bad, 1=good). 14 Power Supply 5 DC (0=bad, 1=good). 13 Power Supply 6 present status (0=present, 1=not-present). 12 Power Supply 5 present status (0=present, 1=not-present). 11 Power Supply 4 DC (0=bad, 1=good). 10 Power Supply 3 DC (0=bad, 1=good). 9 Power Supply 4 present status (0=present, 1=not-present). 8 Power Supply 3 present status (0=present, 1=not-present). 4-7 reserved 3 Power Supply 2 DC (0=bad, 1=good). 2 Power Supply 1 DC (0=bad, 1=good). 1 Power Supply 2 present status (0=present, 1=not-present). 0 Power Supply 1 present status (0=present, 1=not-present).')
snChasFanStatus = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFanStatus.setStatus('deprecated')
if mibBuilder.loadTexts: snChasFanStatus.setDescription('This object is being deprecated, please use snChasFanTable instead. A bit array that contains the value of the fan status. This is a packed bit string. The status of each fan is encoded into one bit. bit value meaning --------- ------- 0 fan failure. 1 fan good. There are two fans per VLAN Switch chassis in this release. The following shows the meaning of each bit: (bit 0 is the least significant bit). bit position meaning ------------ ------- 6-31 reserved 5 fan6 status 4 fan5 status 3 fan4 status 2 fan3 status 1 fan2 status 0 fan1 status')
snChasMainBrdDescription = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasMainBrdDescription.setStatus('current')
if mibBuilder.loadTexts: snChasMainBrdDescription.setDescription('The main board description string. (It is obsoleted for Chassis Product)')
snChasMainPortTotal = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 24))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasMainPortTotal.setStatus('current')
if mibBuilder.loadTexts: snChasMainPortTotal.setDescription('The total number of ports for the main board. (It is obsoleted for Chassis Product)')
snChasExpBrdDescription = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasExpBrdDescription.setStatus('current')
if mibBuilder.loadTexts: snChasExpBrdDescription.setDescription('The expansion board description string. Expansion board are those boards attaching on the main board. (It is obsoleted for Chassis Product)')
snChasExpPortTotal = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 24))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasExpPortTotal.setStatus('current')
if mibBuilder.loadTexts: snChasExpPortTotal.setDescription('The total number of ports for the expansion board. (It is obsoleted for Chassis Product)')
snChasStatusLeds = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasStatusLeds.setStatus('current')
if mibBuilder.loadTexts: snChasStatusLeds.setDescription('A bit array that contains the value of the front panel status LEDs. This is a bit-map; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Status LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (Link off) 1 on (Link on) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 --------')
snChasTrafficLeds = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasTrafficLeds.setStatus('current')
if mibBuilder.loadTexts: snChasTrafficLeds.setDescription('A bit array that contains the value of the front panel traffic LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Traffic LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (no traffic) 1 on (traffic) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 --------')
snChasMediaLeds = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasMediaLeds.setStatus('current')
if mibBuilder.loadTexts: snChasMediaLeds.setDescription('A bit array that contains the value of the front panel media LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Media LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning --------- ------- 0 half duplex 1 full duplex The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 ---------')
snChasEnablePwrSupplyTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snChasEnablePwrSupplyTrap.setStatus('current')
if mibBuilder.loadTexts: snChasEnablePwrSupplyTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate power supply failure traps.')
snChasMainBrdId = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 13), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasMainBrdId.setStatus('current')
if mibBuilder.loadTexts: snChasMainBrdId.setDescription('The main board identifier, which can uniquely identify a board type. It is an encoded octet string with the following meaning: octet 0 - octet string format version, which identifies the format of this string. If format version octet has the value 1, the octets after the version octet have the following meaning: octet 1 - product type, FIWG=0x57, FIBB=0x42, FIMLS=0x4D NI=0x4E, TI=0x54, TIRT=0x52 octet 2 - board type, POWERPC=1, ALPHA=2 The length of the octet string is 27. If format version octet has the value 2, the octets after the version octet have the following meaning: octet 1 - product type: BI_WG 0x57 BI_BB 0x42 BI_NI 0x4E NI_M4 0x4D BI_SLB 0x53 octet 2 - module type: MASTER_FIBER_8G 0x0 MASTER_FIBER_4G 0x1 MASTER_COPPER_16 0x2 SLAVE_FIBER_4G 0x3 FI_MASTER_FIBER_2G 0x4 FI_MASTER_FIBER_4G 0x5 MASTER_COPPER_8G 0x6 FI_MASTER_FIBER_8G 0x7 SLAVE_FIBER_8G 0x8 MASTER_COPPER_12_2 0x9 SLAVE_COPPER_24 0xA FI_SLAVE_COPPER_24 0xB SLAVE_100FX_8 0xD SLAVE_100FX_16 0xC SLAVE_COPPER_8G 0xE SLAVE_COPPER_16_2 0xF STACK_FIBER_8G 0x10 STACK_COPPER_8G 0x11 MASTER_FIBER_2G 0x12 SLAVE_100FX_24 0x13 MASTER_FIBER_0G 0x14 POS_622M 0x15 POS_155M 0x16 SLAVE_FIBER_2G 0x17 SLAVE_COPPER_2G 0x18 FI_SLAVE_FIBER_2G 0x19 FI_SLAVE_FIBER_4G 0x1A FI_SLAVE_FIBER_8G 0x1B FI_SLAVE_COPPER_8G 0x1C FI_MASTER_COPPER_8G 0x1D POS_155M2P 0x1E FI_MASTER_COPPER_4G 0x1F FI_MASTER_COPPER_2G 0x20 MASTER_COPPER_4G 0x21 MASTER_COPPER_2G 0x22 MASTER_M4_8G 0x23 MASTER_M4_4G 0x24 MASTER_M4_2G 0x25 MASTER_M4_0G 0x26 MASTER_M5_0G 0x27 POS_2488M 0x28 SLAVE_M5_0G 0x29 POS_N2488M 0x2A STACK_IPC_48_2 0x2B SLAVE_NPA_FIBER_4G 0x2C ATM_2PORT 0x2D ATM_4PORT 0x2E SLAVE_FIBER_10G 0x2F STACK_FES_48_2 0x30 STACK_FES_24_2 0x31 STACK_FES_96_4 0x32 STACK_FES_12G 0x33 STACK_FESX_24G 0x34 STACK_FESX_24_2_G 0x35 STACK_FESX_24_1_G 0x36 STACK_FESX_48G 0x37 STACK_FESX_48_2_G 0x38 STACK_FESX_48_1_G 0x39 SUPERX_FI_MGMT 0x40 SUPERX_FI_2P10G 0x41 SUPERX_FI_24GC 0x42 SUPERX_FI_24GF 0x43 SUPERX_FI_2P10G_WAN 0x44 SUPERX_FI_MGMT_II 0x4a SLAVE_JC_48E 0xC3 SLAVE_JC_48T 0xC4 MASTER_JC_M4_8G 0xC5 SLAVE_JC_8G 0xC6 SLAVE_JC_B16GF 0xC8 MASTER_JC_B2404 0xC9 SLAVE_JC_B16GC 0xCA The length of the octet string is 28. Both format version 1 and 2: octet 3 - processor type, PVR_M603=3, PVR_M604=4, PVR_M603E=6, PVR_M603EV=7, PVR_M750=8, PVR_M604E=9, PVR_M8245=81 octet 4 to octet 5 - processor speed in MHz octet 6 - MAC type: MAC_NONE=0 MAC_SEEQ_10_100=1, MAC_DEC_10_100=2, MAC_3COM_10_100=3, MAC_X10GMAC_10000=4, MAC_SEEQ_1000=5, MAC_GMAC_1000=6, MAC_VLSI_1000=7 octet 7 - PHY type, PHY_NONE=0, PHY_QSI=1, PHY_BROADCOM=2, PHY_ICS=3, PHY_NATIONAL=4, PHY_LEVEL1=6, PHY_BROADCOM_10_100=7, PHY_LEVEL24=8, PHY_BROADCOM_10000=9 (for 10G), PHY_3COM_10_100=9 (for others) octet 8 - port type, COPPER=0, FIBER=1 octet 9 - fiber port type, NONFIBER=0, SX_FIBER=1, LX_FIBER=2, LHX_FIBER=3, LX_SX_FIBER=4, LHB_FIBER=5 octet 10 to octet 13 - DRAM size in KBytes octet 14 to octet 17 - boot flash size in KBytes octet 18 to octet 21 - code flash size in KBytes octet 22 to octet 27 - serial number. Format version 1 only: octet 28 - chassis backplane type. chassis4000 = 0x00 chassis8000 = 0x02 chassis15000 = 0x01 chassisFISX = 0x04 Turbo8 = 0x07 (stack2) FastIron2 = 0x06 (stack1)')
snChasExpBrdId = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 14), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasExpBrdId.setStatus('current')
if mibBuilder.loadTexts: snChasExpBrdId.setDescription('The expansion board identifier. Expansion board are those boards attaching on the main board. It is an encoded octet string with the following meaning: octet 0 - octet string format version, which identifies the format of this string. If format version octet has the value 1, the octets after the version octet have the following meaning: octet 1 - expansion board type, HUNDRED_MEG_1PORT=1, HUNDRED_MEG_2PORT=2, HUNDRED_MEG_1PORT_COPPER=3, HUNDRED_MEG_2PORT_COPPER=4, HUNDRED_MEG_2PORT_LX=5, GIGA_1PORT=8, GIGA_2PORT=9 octet 2 - fiber port type, NONFIBER=0, SX_FIBER=1, LX_FIBER=2, LHX_FIBER=3, LX_SX_FIBER=4 (It is obsoleted for Chassis Product)')
snChasSpeedLeds = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasSpeedLeds.setStatus('current')
if mibBuilder.loadTexts: snChasSpeedLeds.setDescription('A bit array that contains the value of the front panel media LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Speed LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning --------- ------- 0 10 MBit 1 100 MBit The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 ---------')
snChasEnableFanTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snChasEnableFanTrap.setStatus('current')
if mibBuilder.loadTexts: snChasEnableFanTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate fan failure traps.')
snChasIdNumber = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 17), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snChasIdNumber.setStatus('current')
if mibBuilder.loadTexts: snChasIdNumber.setDescription('An administratively-assigned chassis identity number, used by inventory control.')
snChasActualTemperature = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 18), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-110, 250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasActualTemperature.setStatus('current')
if mibBuilder.loadTexts: snChasActualTemperature.setDescription('Temperature of the chassis. Each unit is 0.5 degrees Celcius. Only management module built with temperature sensor hardware is applicable. For those non-applicable management module, it returns no-such-name.')
snChasWarningTemperature = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 19), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 250))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snChasWarningTemperature.setStatus('current')
if mibBuilder.loadTexts: snChasWarningTemperature.setDescription('Actual temperature higher than this threshold value will trigger the switch to send a temperature warning trap. Each unit is 0.5 degrees Celcius. Only management module built with temperature sensor hardware is applicable. For those non-applicable management module, it returns no-such-name.')
snChasShutdownTemperature = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 20), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 250))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snChasShutdownTemperature.setStatus('current')
if mibBuilder.loadTexts: snChasShutdownTemperature.setDescription('Actual temperature higher than this threshold value will shutdown a partial of the switch hardware to cool down the system. Each unit is 0.5 degrees Celcius. Only management module built with temperature sensor hardware is applicable. For those non-applicable management module, it returns no-such-name.')
snChasEnableTempWarnTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snChasEnableTempWarnTrap.setStatus('current')
if mibBuilder.loadTexts: snChasEnableTempWarnTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate temperature warning traps.')
snChasFlashCard = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 22), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFlashCard.setStatus('current')
if mibBuilder.loadTexts: snChasFlashCard.setDescription('A bit array that contains the value of the flash card status. This is a packed bit string. The status of each flash card is encoded into one bit. bit value meaning --------- ------- 0 flash card absent. 1 flash card present. There are up to two flash cards in this release. This object is valid if M4 management module is present. The following shows the meaning of each bit: (bit 0 is the least significant bit). bit position meaning ------------ ------- 2-31 reserved 1 flash card 2 status 0 flash card 1 status')
snChasFlashCardLeds = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 23), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFlashCardLeds.setStatus('current')
if mibBuilder.loadTexts: snChasFlashCardLeds.setDescription('A bit array that contains the value of the flash card LEDs. This is a packed bit string; each LED is encoded into 1 bit for each flash card. bit value meaning -------- --------- 0 off 1 on ')
snChasNumSlots = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 24), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasNumSlots.setStatus('current')
if mibBuilder.loadTexts: snChasNumSlots.setDescription('Number of slots of the chassis.')
snChasArchitectureType = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("stackable", 1), ("bigIron", 2), ("terathon", 3), ("fifthGen", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasArchitectureType.setStatus('current')
if mibBuilder.loadTexts: snChasArchitectureType.setDescription('Architecture type.')
snChasProductType = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 50, 66, 77, 78, 83, 87))).clone(namedValues=NamedValues(("invalid", 0), ("mg8", 1), ("ni40G", 2), ("imr", 3), ("biRx800", 4), ("niXmr16000", 5), ("biRx400", 6), ("niXmr8000", 7), ("biRx200", 8), ("niXmr4000", 9), ("niMlx16", 10), ("niMlx8", 11), ("niMlx4", 12), ("niMlx32", 13), ("niXmr32000", 14), ("biRx32", 15), ("niCES2000Series", 16), ("niCER2000Series", 17), ("brMlxE4", 18), ("brMlxE8", 19), ("brMlxE16", 20), ("brMlxE32", 21), ("biNI2", 50), ("biBB", 66), ("biM4", 77), ("biNI", 78), ("biSLB", 83), ("biWG", 87)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasProductType.setStatus('current')
if mibBuilder.loadTexts: snChasProductType.setDescription('Product type.')
snChasSystemMode = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("xmr", 1), ("mlx", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasSystemMode.setStatus('current')
if mibBuilder.loadTexts: snChasSystemMode.setDescription('System Mode. This object is only applicable to XMR/MLX/MLXe products. For example, if snChasProductType is niMlx4/niMlx8/niMlx16/niMlx32 then the this object returns mlx(2). If snChasProductType is niXmr4000/niXmr8000 niXmr160000/niXmr32000 this object returns xmr(1). If snChasProductType is brMlxE4/brMlxE8/brMlxE16/brMlxE32 this object returns either xmr(1) or mlx(2) depending on the mode of the system. This object is not supported for the other products.')
snChasFactoryPartNumber = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 28), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFactoryPartNumber.setStatus('current')
if mibBuilder.loadTexts: snChasFactoryPartNumber.setDescription('Factory Part number assigned by the manufacturer.')
snChasFactorySerialNumber = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 1, 29), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFactorySerialNumber.setStatus('current')
if mibBuilder.loadTexts: snChasFactorySerialNumber.setDescription('Factory serial number assigned by the manufacturer.')
snChasPwrSupplyTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 1), )
if mibBuilder.loadTexts: snChasPwrSupplyTable.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupplyTable.setDescription('A table of each power supply information. Only installed power supply appears in a table row.')
snChasPwrSupplyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snChasPwrSupplyIndex"))
if mibBuilder.loadTexts: snChasPwrSupplyEntry.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupplyEntry.setDescription('A row in the power supply table.')
snChasPwrSupplyIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasPwrSupplyIndex.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupplyIndex.setDescription('The index to power supply table.')
snChasPwrSupplyDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasPwrSupplyDescription.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupplyDescription.setDescription('The power supply description string.')
snChasPwrSupplyOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("normal", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasPwrSupplyOperStatus.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupplyOperStatus.setDescription('The power supply operation status.')
snChasPwrSupply2Table = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 2), )
if mibBuilder.loadTexts: snChasPwrSupply2Table.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupply2Table.setDescription('A table of each power supply information for each unit. Only installed power supply appears in a table row.')
snChasPwrSupply2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snChasPwrSupply2Unit"), (0, "FOUNDRY-SN-AGENT-MIB", "snChasPwrSupply2Index"))
if mibBuilder.loadTexts: snChasPwrSupply2Entry.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupply2Entry.setDescription('A row in the power supply table.')
snChasPwrSupply2Unit = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasPwrSupply2Unit.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupply2Unit.setDescription('The index to power supply table.')
snChasPwrSupply2Index = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasPwrSupply2Index.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupply2Index.setDescription('The index to power supply table.')
snChasPwrSupply2Description = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasPwrSupply2Description.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupply2Description.setDescription('The power supply description string.')
snChasPwrSupply2OperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("normal", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasPwrSupply2OperStatus.setStatus('current')
if mibBuilder.loadTexts: snChasPwrSupply2OperStatus.setDescription('The power supply operation status.')
snChasFanTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 1), )
if mibBuilder.loadTexts: snChasFanTable.setStatus('current')
if mibBuilder.loadTexts: snChasFanTable.setDescription('A table of each fan information. Only installed fan appears in a table row.')
snChasFanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snChasFanIndex"))
if mibBuilder.loadTexts: snChasFanEntry.setStatus('current')
if mibBuilder.loadTexts: snChasFanEntry.setDescription('A row in the fan table.')
snChasFanIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFanIndex.setStatus('current')
if mibBuilder.loadTexts: snChasFanIndex.setDescription('The index to fan table.')
snChasFanDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFanDescription.setStatus('current')
if mibBuilder.loadTexts: snChasFanDescription.setDescription('The fan description string.')
snChasFanOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("normal", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFanOperStatus.setStatus('current')
if mibBuilder.loadTexts: snChasFanOperStatus.setDescription('The fan operation status.')
snChasFan2Table = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 2), )
if mibBuilder.loadTexts: snChasFan2Table.setStatus('current')
if mibBuilder.loadTexts: snChasFan2Table.setDescription('A table of each fan information for each unit. Only installed fan appears in a table row.')
snChasFan2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snChasFan2Unit"), (0, "FOUNDRY-SN-AGENT-MIB", "snChasFan2Index"))
if mibBuilder.loadTexts: snChasFan2Entry.setStatus('current')
if mibBuilder.loadTexts: snChasFan2Entry.setDescription('A row in the fan table.')
snChasFan2Unit = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFan2Unit.setStatus('current')
if mibBuilder.loadTexts: snChasFan2Unit.setDescription('The index to fan table.')
snChasFan2Index = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFan2Index.setStatus('current')
if mibBuilder.loadTexts: snChasFan2Index.setDescription('The index to fan table.')
snChasFan2Description = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFan2Description.setStatus('current')
if mibBuilder.loadTexts: snChasFan2Description.setDescription('The fan description string.')
snChasFan2OperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("normal", 2), ("failure", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasFan2OperStatus.setStatus('current')
if mibBuilder.loadTexts: snChasFan2OperStatus.setDescription('The fan operation status.')
snChasUnitTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1), )
if mibBuilder.loadTexts: snChasUnitTable.setStatus('current')
if mibBuilder.loadTexts: snChasUnitTable.setDescription('A table of chassis information for each unit. Only active chassis appears in a table row.')
snChasUnitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snChasUnitIndex"))
if mibBuilder.loadTexts: snChasUnitEntry.setStatus('current')
if mibBuilder.loadTexts: snChasUnitEntry.setDescription('A row in the chassis table.')
snChasUnitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasUnitIndex.setStatus('current')
if mibBuilder.loadTexts: snChasUnitIndex.setDescription('The index to chassis table.')
snChasUnitSerNum = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasUnitSerNum.setStatus('current')
if mibBuilder.loadTexts: snChasUnitSerNum.setDescription('The serial number of the chassis for each unit. If the serial number is unknown or unavailable then the value should be a zero length string.')
snChasUnitNumSlots = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasUnitNumSlots.setStatus('current')
if mibBuilder.loadTexts: snChasUnitNumSlots.setDescription('Number of slots of the chassis for each unit.')
snChasUnitActualTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-110, 250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasUnitActualTemperature.setStatus('current')
if mibBuilder.loadTexts: snChasUnitActualTemperature.setDescription('Temperature of the chassis. Each unit is 0.5 degrees Celcius. Only management module built with temperature sensor hardware is applicable. For those non-applicable management module, it returns no-such-name.')
snChasUnitWarningTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasUnitWarningTemperature.setStatus('current')
if mibBuilder.loadTexts: snChasUnitWarningTemperature.setDescription('Actual temperature higher than this threshold value will trigger the switch to send a temperature warning trap. Each unit is 0.5 degrees Celcius. Only management module built with temperature sensor hardware is applicable. For those non-applicable management module, it returns no-such-name.')
snChasUnitShutdownTemperature = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasUnitShutdownTemperature.setStatus('current')
if mibBuilder.loadTexts: snChasUnitShutdownTemperature.setDescription('Actual temperature higher than this threshold value will shutdown a partial of the switch hardware to cool down the system. Each unit is 0.5 degrees Celcius. Only management module built with temperature sensor hardware is applicable. For those non-applicable management module, it returns no-such-name.')
snChasUnitPartNum = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 1, 4, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snChasUnitPartNum.setStatus('current')
if mibBuilder.loadTexts: snChasUnitPartNum.setDescription('The part number of the chassis for each unit. If the part number is unknown or unavailable then the value should be a zero length string.')
snAgentGbl = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1))
snAgentBrd = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2))
snAgentTrp = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3))
snAgentBoot = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4))
snAgCfgEos = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 5))
snAgentLog = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6))
snAgentSysParaConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7))
snAgentConfigModule = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8))
snAgentUser = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9))
snAgentRedundant = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 10))
snAgentCpu = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11))
snAgentHw = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12))
snAgentTemp = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13))
snAgentPoe = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 14))
snAgentLicense = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15))
brcdSw = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16))
snAgentTask = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17))
brcdSwPackageGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1))
brcdSwPackageUpgrade = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1))
brcdSwIntfModAutoUpgrade = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 2))
snAgReload = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("running", 2), ("reset", 3), ("busy", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgReload.setStatus('current')
if mibBuilder.loadTexts: snAgReload.setDescription('Action object to reboot the agent. The following values can only be read: other(1).....agent in unknown or other state running(2)...agent running busy(4).......reload not allowed at this time, flash is busy The following value can be written: reset(3).....do a hard reset The agent will return a response before the action occurs.')
snAgEraseNVRAM = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normal", 1), ("error", 2), ("erase", 3), ("erasing", 4), ("busy", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgEraseNVRAM.setStatus('current')
if mibBuilder.loadTexts: snAgEraseNVRAM.setDescription('Action object to erase NVRAM of the agent. The following values can only be read: normal(1) error(2)... operation failed or bad flash erasing(4)...agent is erasing NVRAM flash busy(5).......operation not allowed at this time, flash is busy The following value can be written: erase(3).....do erase The agent will return a response even before the erase is done. And the read value will be erasing until erase is done. And the erase request will be rejected until error or normal.')
snAgWriteNVRAM = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normal", 1), ("error", 2), ("write", 3), ("writing", 4), ("busy", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgWriteNVRAM.setStatus('current')
if mibBuilder.loadTexts: snAgWriteNVRAM.setDescription('Action object to save all configuration info to NVRAM of the agent. The following values can only be read: normal(1) error(2)... operation failed or bad flash writing(4)...agent is writing NVRAM flash busy(5).......operation not allowed at this time, flash is busy The following value can be written: write(3).....do write The agent will return a response even before the write is done. And the read value will be writing until write is done. And the write request will be rejected until error or normal.')
snAgConfigFromNVRAM = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("normal", 1), ("error", 2), ("config", 3), ("configing", 4), ("busy", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgConfigFromNVRAM.setStatus('current')
if mibBuilder.loadTexts: snAgConfigFromNVRAM.setDescription('This object is no more supported by FastIron and NetIron agents and should not be used. Action object to config the switch from NVRAM of the agent. The following values can only be read: normal(1) error(2)... operation failed or bad flash configing(4)...configing from NVRAM flash is in process. busy(5).......operation not allowed at this time, flash is busy The following value can be written: config(3).....do config The agent will return a response after config is done.')
snAgTftpServerIp = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTftpServerIp.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTftpServerIp.setDescription('The tftp server address, this will be used for both download/upload image file and config file.')
snAgImgFname = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgImgFname.setStatus('current')
if mibBuilder.loadTexts: snAgImgFname.setDescription('Name of the image file including path currently associated with the system. When the object is not used, the value is a zero length string.')
snAgImgLoad = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33))).clone(namedValues=NamedValues(("normal", 1), ("flashPrepareReadFailure", 2), ("flashReadError", 3), ("flashPrepareWriteFailure", 4), ("flashWriteError", 5), ("tftpTimeoutError", 6), ("tftpOutOfBufferSpace", 7), ("tftpBusy", 8), ("tftpRemoteOtherErrors", 9), ("tftpRemoteNoFile", 10), ("tftpRemoteBadAccess", 11), ("tftpRemoteDiskFull", 12), ("tftpRemoteBadOperation", 13), ("tftpRemoteBadId", 14), ("tftpRemoteFileExists", 15), ("tftpRemoteNoUser", 16), ("operationError", 17), ("loading", 18), ("uploadMPPrimary", 19), ("downloadMPPrimary", 20), ("uploadMPSecondary", 21), ("downloadMPSecondary", 22), ("tftpWrongFileType", 23), ("downloadSPPrimary", 24), ("downloadSPSecondary", 25), ("uploadMPBootROM", 26), ("downloadMPBootROM", 27), ("uploadMPBootTFTP", 28), ("downloadMPBootTFTP", 29), ("uploadMPMonitor", 30), ("downloadMPMonitor", 31), ("downloadSPBootROM", 32), ("downloadSPMonitor", 33)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgImgLoad.setStatus('current')
if mibBuilder.loadTexts: snAgImgLoad.setDescription('Action object to down/up load a new image to the agent. The following values: Error values: from normal(1) to operationError(17) and tftpWrongFileType(23). loading(18) .. operation is in process. The following values are used for set : uploadMPPrimary(19)..upload the Primary image from MP flash to tftp server. downloadMPPrimary(20)..download the Primary image from tftp server to MP flash. uploadMPSecondary(21)..upload the Secondary image from MP flash to tftp server. downloadMPSecondary(22)..download the Secondary image from tftp server to MP flash. downloadSPPrimary(24)..download the Primary image from tftp server to SP flash. downloadSPSecondary(25)..download the Secondary image from tftp server to SP flash. uploadMPBootROM(26)..upload the Boot from flash image from MP flash to tftp server. downloadMPBootROM(27)..download the Boot from flash image from tftp server to MP flash. uploadMPBootTFTP(28)..upload the Boot from TFTP image from MP flash to tftp server. downloadMPBootTFTP(29)..download the Boot from TFTP image from tftp server to MP flash. uploadMPMonitor(30)..upload the Monitor image from MP flash to tftp server. downloadMPMonitor(31)..download the Monitor image from tftp server to MP flash. downloadSPBootROM(32)..download the Boot image from tftp server to SP flash. downloadSPMonitor(33)..download the Monitor image from tftp server to SP flash. MP is the management processor, SP is the switch processor. The image filename is defined in snAgImgFname. The tftp server address is defined in snAgTftpServerIp. Atomic set of snAgImgLoad, snAgImgFname and snAgTftpServerIp is required to allow down/up load to happen. And the write request will be rejected during loading until error or normal.')
snAgCfgFname = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgCfgFname.setStatus('current')
if mibBuilder.loadTexts: snAgCfgFname.setDescription('Name of the config file(s) including path currently associated with the system. Some agents in special situations may support a value which contains multiple file names instead of a single file name. Multiple names are specified as a list of file names separated by semicolons (;). When the object is not used, the value is a zero length string.')
snAgCfgLoad = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30))).clone(namedValues=NamedValues(("normal", 1), ("flashPrepareReadFailure", 2), ("flashReadError", 3), ("flashPrepareWriteFailure", 4), ("flashWriteError", 5), ("tftpTimeoutError", 6), ("tftpOutOfBufferSpace", 7), ("tftpBusy", 8), ("tftpRemoteOtherErrors", 9), ("tftpRemoteNoFile", 10), ("tftpRemoteBadAccess", 11), ("tftpRemoteDiskFull", 12), ("tftpRemoteBadOperation", 13), ("tftpRemoteBadId", 14), ("tftpRemoteFileExists", 15), ("tftpRemoteNoUser", 16), ("operationError", 17), ("loading", 18), ("uploadFromFlashToServer", 20), ("downloadToFlashFromServer", 21), ("uploadFromDramToServer", 22), ("downloadToDramFromServer", 23), ("uploadFromFlashToNMS", 24), ("downloadToFlashFromNMS", 25), ("uploadFromDramToNMS", 26), ("downloadToDramFromNMS", 27), ("operationDoneWithNMS", 28), ("tftpWrongFileType", 29), ("downloadToDramFromServerOverwrite", 30)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgCfgLoad.setStatus('current')
if mibBuilder.loadTexts: snAgCfgLoad.setDescription('Action object to down/up load a config image to the agent. The following values can be read : Error values: from normal(1) to operationError(17) and tftpWrongFileType(29). loading(18) .. operation is in process. The following value can be used to set: uploadFromFlashToServer(20)..upload the config from flash to tftp server. downloadToFlashFromServer(21)..download the config to flash from tftp server. uploadFromDramToServer(22)..upload the config from DRAM to tftp server. downloadToDramFromServer(23)..download the config to DRAM from tftp server. downloadToDramFromServerOverwrite(30)..download the config to DRAM from tftp server, clear the running-config, and overwrite with the new config. The following value can be used to set: uploadFromFlashToNMS(24)..upload the config from flash to NMS. downloadToFlashFromNMS(25)..download the config to flash from NMS. uploadFromDramToNMS(26)..upload the config from DRAM to NMS. downloadToDramFromNMS(27)..download the config to DRAM from NMS. The config filename is defined in snAgCfgFname. The tftp server address is defined in snAgTftpServerIp. Atomic set of snAgCfgLoad, snAgCfgFname and snAgTftpServerIp is required to allow down/up load to happen. And the write request will be rejected during loading until error or normal. For those NMS commands from (24) to (27), The snAgCfgEosTable MIB must be sent along in one PDU. A seperate write memory cli or set snAgWriteNVRAM is required to save the config to NVRAM.')
snAgDefGwayIp = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 10), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgDefGwayIp.setStatus('current')
if mibBuilder.loadTexts: snAgDefGwayIp.setDescription('The default gateway (router) address.')
snAgImgVer = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 11), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgImgVer.setStatus('current')
if mibBuilder.loadTexts: snAgImgVer.setDescription("The version of the running software in the form 'major.minor.maintenance[letters]'.")
snAgFlashImgVer = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgFlashImgVer.setStatus('current')
if mibBuilder.loadTexts: snAgFlashImgVer.setDescription("The version of the software image saved in local storage such as flash memory in the form 'major.minor.maintenance[letters]'. If not known or not available, then the value is a zero length string.")
snAgGblIfIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 13), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblIfIpAddr.setStatus('current')
if mibBuilder.loadTexts: snAgGblIfIpAddr.setDescription('The interface ip address.')
snAgGblIfIpMask = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 14), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblIfIpMask.setStatus('current')
if mibBuilder.loadTexts: snAgGblIfIpMask.setDescription('The interface ip address mask.')
snAgGblPassword = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 15), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 97))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblPassword.setStatus('current')
if mibBuilder.loadTexts: snAgGblPassword.setDescription("System Security Access Password and this is only allowed to use for setting but not for reading. An SNMP-Get will get an null string. By default, this object needs to be in the same PDU while doing Set operation some critical SNMP objects. If command 'no snmp-server pw-check' is configured on the device, then this object is not needed in the PDU. The value of this object depends on the authentication method configured for SNMP. If there's no AAA authentication configuration for SNMP, this object will have the enable superuser password. If the AAA authentication for SNMP is configured and has leading method as enable or line, this object will have the corresponding enable or line password. If the switch has AAA authentication for SNMP operation, and the method specified is one of local, TACACS+, or RADIUS, this object will have format '<username> <password>' (note one space character in between). The max size allows concatenation of max 48 octets of username and 48 octets of password, with one blank character")
snAgTrpRcvrCurEntry = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 16), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgTrpRcvrCurEntry.setStatus('current')
if mibBuilder.loadTexts: snAgTrpRcvrCurEntry.setDescription('The current total entries of the Trap Receiver Table are created.')
snAgGblDataRetrieveMode = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("nextbootCfg", 0), ("operationalData", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblDataRetrieveMode.setStatus('current')
if mibBuilder.loadTexts: snAgGblDataRetrieveMode.setDescription('By default, this mode is set to nextbootCfg(0). The VLAN Table and Port-STP Table data will be retrieved according to this mode. nextbootCfg(0).........the nextboot configuration data are retrieved operationalData(1)..........the current running data are retrieved')
snAgSystemLog = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 20), OctetString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSystemLog.setStatus('current')
if mibBuilder.loadTexts: snAgSystemLog.setDescription("The system log object to indicate whether any Network Management Station(NMS) has login. The first byte of the octet string is the value decribed below and followed by 4 bytes of secret code. login(1)..... a value for a NMS to login. heartbeat(2)... a value for the login NMS periodically to checkin; Otherwise, the Agent will automatically set this object to logout(3) after a timeout period. logout(3).... a value for a NMS to logout. changePassword(4).... a value for the login NMS to change password, only if snAgGblPasswordChangeMode was configured to 'anyMgmtEntity'. changeReadOnlyCommunity(5).... a value for the login NMS to change the read-only community string, only if snAgGblPasswordChangeMode was configured to 'anyMgmtEntity'. changeReadWriteCommunity(6).... a value for the login NMS to change the read-write community string, only if snAgGblPasswordChangeMode was configured to 'anyMgmtEntity'.")
snAgGblEnableColdStartTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblEnableColdStartTrap.setStatus('current')
if mibBuilder.loadTexts: snAgGblEnableColdStartTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate cold start traps.')
snAgGblEnableLinkUpTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 22), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblEnableLinkUpTrap.setStatus('current')
if mibBuilder.loadTexts: snAgGblEnableLinkUpTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate link up traps.')
snAgGblEnableLinkDownTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblEnableLinkDownTrap.setStatus('current')
if mibBuilder.loadTexts: snAgGblEnableLinkDownTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate link down traps.')
snAgGblPasswordChangeMode = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 24), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("anyMgmtEntity", 1), ("consoleAndTelnet", 2), ("consoleOnly", 3), ("telnetOnly", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblPasswordChangeMode.setStatus('current')
if mibBuilder.loadTexts: snAgGblPasswordChangeMode.setDescription("Specify which management entity is allowed to change the password. 'anyMgmtEntity' management entities are SNMP management station, console command line interface, and telnet command line interface. For security reason, this object can only be modified by console command line interface or telnet command line interface. The default value is 'consoleAndTelnet', of which both console and telnet command line interfaces are allowed to change the password.")
snAgGblReadOnlyCommunity = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 25), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblReadOnlyCommunity.setStatus('current')
if mibBuilder.loadTexts: snAgGblReadOnlyCommunity.setDescription("SNMP read community string and this is only allowed to use for setting but not for reading. An SNMP-Get will get an zero length string. Set operation on this object is allowed only if command 'password-change any' is configured on the target device.")
snAgGblReadWriteCommunity = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 26), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblReadWriteCommunity.setStatus('current')
if mibBuilder.loadTexts: snAgGblReadWriteCommunity.setDescription("SNMP read-write community string and this is only allowed to use for setting but not for reading. An SNMP-Get will get an zero length string. Set operation on this object is allowed only if command 'password-change any' is configured on the target device.")
snAgGblCurrentSecurityLevel = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 27), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 5))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblCurrentSecurityLevel.setStatus('current')
if mibBuilder.loadTexts: snAgGblCurrentSecurityLevel.setDescription('This represnts the current log-in security level. Each level of security requires a password to permit user for different system configurations.')
snAgGblSecurityLevelSet = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 28), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 5))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblSecurityLevelSet.setStatus('current')
if mibBuilder.loadTexts: snAgGblSecurityLevelSet.setDescription('This shows which the security level password to be set.')
snAgGblLevelPasswordsMask = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblLevelPasswordsMask.setStatus('current')
if mibBuilder.loadTexts: snAgGblLevelPasswordsMask.setDescription('This shows the bitmap of level passwords which successfully assigned to the system.')
snAgGblQueueOverflow = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblQueueOverflow.setStatus('current')
if mibBuilder.loadTexts: snAgGblQueueOverflow.setDescription('false - The device queues are not overflow. true - The device queues are overflow.')
snAgGblBufferShortage = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 31), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblBufferShortage.setStatus('current')
if mibBuilder.loadTexts: snAgGblBufferShortage.setDescription('false - The device buffers are adequate. true - The device buffers are in shortage.')
snAgGblDmaFailure = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblDmaFailure.setStatus('current')
if mibBuilder.loadTexts: snAgGblDmaFailure.setDescription('false - The device DMAs are in good condition. true - One of the DMAs in the device fails.')
snAgGblResourceLowWarning = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 33), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblResourceLowWarning.setStatus('current')
if mibBuilder.loadTexts: snAgGblResourceLowWarning.setDescription('false - No, the device does not have resource-low-warning. true - Yes, the device does have resource-low-warning.')
snAgGblExcessiveErrorWarning = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 34), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblExcessiveErrorWarning.setStatus('current')
if mibBuilder.loadTexts: snAgGblExcessiveErrorWarning.setDescription('false - No, the device does not have any excessive collision, FCS errors, alignment warning etc. true - Yes, the device does have.')
snAgGblCpuUtilData = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 35), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblCpuUtilData.setStatus('current')
if mibBuilder.loadTexts: snAgGblCpuUtilData.setDescription('The statistics collection of utilization of the CPU in the device. In NetIron and FastIron platforms, reading this object resets all the counters. Therefore, a user need not set snAgGblUtilCollect.')
snAgGblCpuUtilCollect = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 36), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblCpuUtilCollect.setStatus('deprecated')
if mibBuilder.loadTexts: snAgGblCpuUtilCollect.setDescription('enable(1) - The beginning of statistics collection of utilization of the CPU in the device. disable(0) - The end of statistics collection of utilization of the CPU in the device. This object is deprecated. User need not set this object. User can read snAgGblCpuUtilData to reset all the counters. ')
snAgGblTelnetTimeout = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 37), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblTelnetTimeout.setStatus('current')
if mibBuilder.loadTexts: snAgGblTelnetTimeout.setDescription('Telnet session idling timeout value. Each value unit is one minute. FastIron and NetIron platforms support value upto 240 minutes. On these platforms, value 0 means telnet sessions do not timeout.')
snAgGblEnableWebMgmt = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 38), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblEnableWebMgmt.setStatus('current')
if mibBuilder.loadTexts: snAgGblEnableWebMgmt.setDescription('Enable/disable web management.')
snAgGblSecurityLevelBinding = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 39), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblSecurityLevelBinding.setStatus('current')
if mibBuilder.loadTexts: snAgGblSecurityLevelBinding.setDescription('This represnts the binding of a given password to which security level. A value of 255 indicates an invalid binding.')
snAgGblEnableSLB = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 40), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblEnableSLB.setStatus('current')
if mibBuilder.loadTexts: snAgGblEnableSLB.setDescription('Enable/disable Server Load Balancing.')
snAgSoftwareFeature = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 41), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSoftwareFeature.setStatus('current')
if mibBuilder.loadTexts: snAgSoftwareFeature.setDescription('A bit string representing the software feature of the running switch/router image. These features were conditional compiled to different product images. For each bit, exist feature has a value of 1, but non-exist feature has a value of 0. octet 0, bit 0 - RMON octet 0, bit 1 - ipx switching octet 0, bit 2 - server load balancing octet 0, bit 3 - layer 3 filter in switch octet 0, bit 4 - ipx routing octet 0, bit 5 - appletalk routing octet 0, bit 6 - ip multicast routing octet 0, bit 7 - local access control octet 1, bit 0 - BGP routing octet 1, bit 1 - loopback interface octet 1, bit 2 - BigIron multi-management module octet 1, bit 3 - BigIron SYSIF II octet 1, bit 4 - BigIron POS support octet 1, bit 5 - appletalk cable vlan octet 1, bit 6 - 64 subnet octet 1, bit 7 - multi-slot trunk octet 2, bit 0 - TACACS octet 2, bit 1 - Gigabit Ethernet port auto-negotiation mode octet 2, bit 2 - FSRP octet 2, bit 3 - Exodus requested OSPF enhancement octet 2, bit 4 - OSPF NSSA octet 2, bit 5 - POS octet 2, bit 6 - QOS octet 2, bit 7 - Single Span octet 3, bit 0 - Fast Span octet 3, bit 1 - Base L3 octet 3, bit 2 - static log buffer octet 3, bit 3 - L2 POS octet 3, bit 4 - BI15K octet 3, bit 5 - L2 ATM octet 3, bit 6 - ATM octet 3, bit 7 - NETFLOW octet 4, bit 0 - SFLOW octet 4, bit 1 - GVRP octet 4, bit 2 - GARP octet 4, bit 3 - dynamic trunk octet 4, bit 4 - IGC 8G octet 4, bit 5 - rate limit octet 4, bit 6 - IPC rate limit octet 4, bit 7 - MPLS octet 5, bit 0 - ISIS octet 5, bit 1 - link aggregation octet 5, bit 2 - port dual mode octet 5, bit 3 - private vlan octet 5, bit 4 - MBGP octet 5, bit 5 - ipv6 protocol vlan octet 5, bit 6 - X10G octet 5, bit 7 - fastiron edge switch/router (gridiron) octet 6, bit 0 - FDP octet 6, bit 1 - port tag type octet 6, bit 2 - wireless capable octet 6, bit 3 - snSwPortVlanId object has changed from read-only to read-write octet 6, bit 4 - LLDP Bit 0 is the least significant bit of an octet, and bit 7 is the most significant bit of an octet.')
snAgGblEnableModuleInsertedTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblEnableModuleInsertedTrap.setStatus('current')
if mibBuilder.loadTexts: snAgGblEnableModuleInsertedTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate hardware module inserted to chassis traps.')
snAgGblEnableModuleRemovedTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 43), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblEnableModuleRemovedTrap.setStatus('current')
if mibBuilder.loadTexts: snAgGblEnableModuleRemovedTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate hardware module removed from chassis traps.')
snAgGblTrapMessage = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 44), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblTrapMessage.setStatus('current')
if mibBuilder.loadTexts: snAgGblTrapMessage.setDescription('A generic trap message string.')
snAgGblEnableTelnetServer = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 45), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblEnableTelnetServer.setStatus('current')
if mibBuilder.loadTexts: snAgGblEnableTelnetServer.setDescription('Enable or disable telnet server in device.')
snAgGblTelnetPassword = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 46), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 48))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblTelnetPassword.setStatus('current')
if mibBuilder.loadTexts: snAgGblTelnetPassword.setDescription("Telnet Access Password and this is only useful for write operation. A read operation get a null string. Set operation on this object is allowed only if command 'password-change any' is configured on the target device. Another configuration affecting this is 'no snmp-server pw-check'. If its configured on the device, there's no need to pass another varbind snAgGblPassword. By default, 'snmp-server pw-check' is true, and thus needs to have snAgGblPassword along with this object. in the same Set PDU.")
snAgBuildDate = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 47), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgBuildDate.setStatus('current')
if mibBuilder.loadTexts: snAgBuildDate.setDescription('The date of the built software.')
snAgBuildtime = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 48), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgBuildtime.setStatus('current')
if mibBuilder.loadTexts: snAgBuildtime.setDescription('The time of the built software.')
snAgBuildVer = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 49), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgBuildVer.setStatus('current')
if mibBuilder.loadTexts: snAgBuildVer.setDescription('The image label of the built software')
snAgGblCpuUtil1SecAvg = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 50), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblCpuUtil1SecAvg.setStatus('current')
if mibBuilder.loadTexts: snAgGblCpuUtil1SecAvg.setDescription('The statistics collection of 1 second CPU utilization.')
snAgGblCpuUtil5SecAvg = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 51), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblCpuUtil5SecAvg.setStatus('current')
if mibBuilder.loadTexts: snAgGblCpuUtil5SecAvg.setDescription('The statistics collection of 5 second CPU utilization.')
snAgGblCpuUtil1MinAvg = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 52), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblCpuUtil1MinAvg.setStatus('current')
if mibBuilder.loadTexts: snAgGblCpuUtil1MinAvg.setDescription('The statistics collection of 1 minute CPU utilization.')
snAgGblDynMemUtil = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 53), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblDynMemUtil.setStatus('deprecated')
if mibBuilder.loadTexts: snAgGblDynMemUtil.setDescription('The system dynamic memory utilization, in unit of percentage. Deprecated: Refer to snAgSystemDRAMUtil. For NI platforms, refer to snAgentBrdMemoryUtil100thPercent')
snAgGblDynMemTotal = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 54), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblDynMemTotal.setStatus('deprecated')
if mibBuilder.loadTexts: snAgGblDynMemTotal.setDescription('The total amount of system dynamic memory, in number of bytes. Deprecated: Refer to snAgSystemDRAMTotal. For NI platforms, refer to snAgentBrdMemoryTotal')
snAgGblDynMemFree = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 55), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblDynMemFree.setStatus('deprecated')
if mibBuilder.loadTexts: snAgGblDynMemFree.setDescription('The free amount of system dynamic memory, in number of bytes. Deprecated: Refer to snAgSystemDRAMFree. For NI platforms, refer to snAgentBrdMemoryAvailable')
snAgImgLoadSPModuleType = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 56), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("other", 1), ("vm1", 2), ("pos12", 3), ("pos48", 4), ("atm", 5), ("gignpa", 6), ("lp", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgImgLoadSPModuleType.setStatus('current')
if mibBuilder.loadTexts: snAgImgLoadSPModuleType.setDescription('The switch processor module type of which that receives the downloaded image.')
snAgImgLoadSPModuleNumber = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 57), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgImgLoadSPModuleNumber.setStatus('current')
if mibBuilder.loadTexts: snAgImgLoadSPModuleNumber.setDescription('The slot numner of a switch processor module that receives the downloaded image. Setting value 0 applies to all SP modules.')
snAgTrapHoldTime = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 58), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTrapHoldTime.setStatus('current')
if mibBuilder.loadTexts: snAgTrapHoldTime.setDescription('The time in seconds for which traps will be witheld during system initialization.')
snAgSFlowSourceInterface = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 59), InterfaceIndex()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSFlowSourceInterface.setStatus('current')
if mibBuilder.loadTexts: snAgSFlowSourceInterface.setDescription('Use the ifIndex value here to specify the source interface to be used for sFlow packets. The interface should have IP address configured on it. Value of 0 indicates that source interface has not been configured for sFlow. Port 65534 is used to specify a null port.')
snAgGblTelnetLoginTimeout = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 60), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblTelnetLoginTimeout.setStatus('current')
if mibBuilder.loadTexts: snAgGblTelnetLoginTimeout.setDescription('Telnet session login timeout value in minutes. FastIron and NetIron platforms support value upto 10 minutes with a default of 2 minutes and 1 minutes for FastIron and NetIron respectively.')
snAgGblBannerExec = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 61), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblBannerExec.setStatus('current')
if mibBuilder.loadTexts: snAgGblBannerExec.setDescription("EXEC process creation banner. Insert newlines using '\\n' within the string.")
snAgGblBannerIncoming = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 62), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblBannerIncoming.setStatus('current')
if mibBuilder.loadTexts: snAgGblBannerIncoming.setDescription("Incoming terminal line banner. Insert newlines using '\\n' within the string.")
snAgGblBannerMotd = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 63), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblBannerMotd.setStatus('current')
if mibBuilder.loadTexts: snAgGblBannerMotd.setDescription("Message-of-the-day banner. Insert newlines using '\\n' within the string.")
snAgWebMgmtServerTcpPort = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 64), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgWebMgmtServerTcpPort.setStatus('current')
if mibBuilder.loadTexts: snAgWebMgmtServerTcpPort.setDescription('The TCP port number of web management interface.')
snAgTftpServerAddrType = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 65), InetAddressType().clone('ipv4')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTftpServerAddrType.setStatus('current')
if mibBuilder.loadTexts: snAgTftpServerAddrType.setDescription('TFTP server IP address Type. Supported address types are ipv4(1) and ipv6(2)')
snAgTftpServerAddr = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 66), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTftpServerAddr.setStatus('current')
if mibBuilder.loadTexts: snAgTftpServerAddr.setDescription('TFTP server IP address.')
snAgGblDeleteFirstBeforeDownload = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 67), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgGblDeleteFirstBeforeDownload.setStatus('current')
if mibBuilder.loadTexts: snAgGblDeleteFirstBeforeDownload.setDescription('When set to true deletes the existing target file on the Management module flash. This object can be set to true only when the snAgImgLoad is set to downloadMPPrimary(20), downloadMPSecondary(22), downloadSPPrimary(24), downloadSPSecondary(25) or downloadMPMonitor(31) in the same SNMP set request PDU. This object is reset to false after successful or unsuccessful download of specified file to flash. Reading this object returns false(2).')
snAgGblPasswordCheckMode = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 1, 68), EnabledStatus().clone('enabled')).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgGblPasswordCheckMode.setStatus('current')
if mibBuilder.loadTexts: snAgGblPasswordCheckMode.setDescription('enabled(1) -The password checking for SNMP set requests is enabled. The default value is enable. disabled(2) - The password checking for SNMP set requests is disabled. When enabled all image/file related MIB object set requests PDU must include the password using the snAgGblPassword.')
snAgentBrdTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1), )
if mibBuilder.loadTexts: snAgentBrdTable.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdTable.setDescription('A table of each physical board information.')
snAgentBrdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentBrdIndex"))
if mibBuilder.loadTexts: snAgentBrdEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdEntry.setDescription('A row in the Agent Board table.')
snAgentBrdIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdIndex.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdIndex.setDescription('The index to the Agent Interface Table.')
snAgentBrdMainBrdDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdMainBrdDescription.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdMainBrdDescription.setDescription('The main board description string.')
snAgentBrdMainBrdId = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 3), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdMainBrdId.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdMainBrdId.setDescription('The main board identifier, which can uniquely identify a board type. It is an encoded octet string with the following meaning: octet 0 - octet string format version, which identifies the format of this string. If format version octet has the value 2, the octets after the version octet have the following meaning: octet 1 - product type: BI_WG 0x57 BI_BB 0x42 BI_NI 0x4E BI_NI2 0x32 NI_M4 0x4D BI_SLB 0x53 octet 2 - module type: MASTER_FIBER_8G 0x0 MASTER_FIBER_4G 0x1 MASTER_COPPER_16 0x2 SLAVE_FIBER_4G 0x3 FI_MASTER_FIBER_2G 0x4 FI_MASTER_FIBER_4G 0x5 MASTER_COPPER_8G 0x6 FI_MASTER_FIBER_8G 0x7 SLAVE_FIBER_8G 0x8 MASTER_COPPER_12_2 0x9 SLAVE_COPPER_24 0xA FI_SLAVE_COPPER_24 0xB SLAVE_100FX_8 0xD SLAVE_100FX_16 0xC SLAVE_COPPER_8G 0xE SLAVE_COPPER_16_2 0xF STACK_FIBER_8G 0x10 STACK_COPPER_8G 0x11 MASTER_FIBER_2G 0x12 SLAVE_100FX_24 0x13 MASTER_FIBER_0G 0x14 POS_622M 0x15 POS_155M 0x16 SLAVE_FIBER_2G 0x17 SLAVE_COPPER_2G 0x18 FI_SLAVE_FIBER_2G 0x19 FI_SLAVE_FIBER_4G 0x1A FI_SLAVE_FIBER_8G 0x1B FI_SLAVE_COPPER_8G 0x1C FI_MASTER_COPPER_8G 0x1D POS_155M2P 0x1E FI_MASTER_COPPER_4G 0x1F FI_MASTER_COPPER_2G 0x20 MASTER_COPPER_4G 0x21 MASTER_COPPER_2G 0x22 MASTER_M4_8G 0x23 MASTER_M4_4G 0x24 MASTER_M4_2G 0x25 MASTER_M4_0G 0x26 MASTER_M5_0G 0x27 POS_2488M 0x28 SLAVE_M5_0G 0x29 POS_N2488M 0x2A STACK_IPC_48_2 0x2B SLAVE_NPA_FIBER_4G 0x2C ATM_2PORT 0x2D ATM_4PORT 0x2E SLAVE_FIBER_10G 0x2F STACK_FES_48_2 0x30 STACK_FES_24_2 0x31 STACK_FES_96_4 0x32 STACK_FES_12G 0x33 STACK_FESX_24G 0x34 STACK_FESX_24_2_G 0x35 STACK_FESX_24_1_G 0x36 STACK_FESX_48G 0x37 STACK_FESX_48_2_G 0x38 STACK_FESX_48_1_G 0x39 SUPERX_FI_MGMT 0x40 SUPERX_FI_2P10G 0x41 SUPERX_FI_24GC 0x42 SUPERX_FI_24GF 0x43 SUPERX_FI_2P10G_WAN 0x44 SUPERX_FI_MGMT_II 0x4a SLAVE_JC_48E 0xC3 SLAVE_JC_48T 0xC4 MASTER_JC_M4_8G 0xC5 SLAVE_JC_8G 0xC6 SLAVE_JC_B16GF 0xC8 MASTER_JC_B2404 0xC9 SLAVE_JC_B16GC 0xCA SLAVE_JC_B24FX 0xCE octet 3 - processor type, PVR_M603=3, PVR_M604=4, PVR_M603E=6, PVR_M603EV=7, PVR_M750=8, PVR_M604E=9, PVR_M8245=81 octet 4 to octet 5 - processor speed in MHz octet 6 - MAC type: MAC_NONE=0 MAC_SEEQ_10_100=1, MAC_DEC_10_100=2, MAC_3COM_10_100=3, MAC_X10GMAC_10000=4, MAC_SEEQ_1000=5, MAC_GMAC_1000=6, MAC_VLSI_1000=7 octet 7 - PHY type, PHY_NONE=0, PHY_QSI=1, PHY_BROADCOM=2, PHY_ICS=3, PHY_NATIONAL=4, PHY_LEVEL1=6, PHY_BROADCOM_10_100=7, PHY_LEVEL24=8, PHY_BROADCOM_10000=9 (for 10G), PHY_3COM_10_100=9 (for others) octet 8 - port type, COPPER=0, FIBER=1 octet 9 - fiber port type, NONFIBER=0, SX_FIBER=1, LX_FIBER=2, LHX_FIBER=3, LX_SX_FIBER=4, LHB_FIBER=5 octet 10 to octet 13 - DRAM size in KBytes octet 14 to octet 17 - boot flash size in KBytes octet 18 to octet 21 - code flash size in KBytes octet 22 to octet 27 - serial number. octet 28 - chassis backplane type. chassis4000 = 0x00 chassis8000 = 0x02 chassis15000 = 0x01 chassisFISX = 0x04 Turbo8 = 0x07 (stack2) FastIron2 = 0x06 (stack1)')
snAgentBrdMainPortTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdMainPortTotal.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdMainPortTotal.setDescription('The total number of ports for the main board.')
snAgentBrdExpBrdDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdExpBrdDescription.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdExpBrdDescription.setDescription('The expansion board description string. Expansion board are those boards attaching on the main board.')
snAgentBrdExpBrdId = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 6), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdExpBrdId.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdExpBrdId.setDescription('The expansion board identifier. Expansion board are those boards attaching on the main board. It is an encoded octet string with the following meaning: octet 0 - octet string format version, which identifies the format of this string. If format version octet has the value 1, the octets after the version octet have the following meaning: octet 1 - expansion board type, HUNDRED_MEG_1PORT=1, HUNDRED_MEG_2PORT=2, HUNDRED_MEG_1PORT_COPPER=3, HUNDRED_MEG_2PORT_COPPER=4, HUNDRED_MEG_2PORT_LX=5, GIGA_1PORT=8, GIGA_2PORT=9 octet 2 - fiber port type, NONFIBER=0, SX_FIBER=1, LX_FIBER=2, LHX_FIBER=3, LX_SX_FIBER=4, LHB_FIBER=5')
snAgentBrdExpPortTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 24))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdExpPortTotal.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdExpPortTotal.setDescription('The total number of ports for the expansion board.')
snAgentBrdStatusLeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdStatusLeds.setStatus('deprecated')
if mibBuilder.loadTexts: snAgentBrdStatusLeds.setDescription('A bit array that contains the value of the front panel status LEDs. This is a bit-map; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 32 that means 32 Ports Status LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (Link off) 1 on (Link on) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 (It was obsoleted after release 07100, replaced by snAgentBrdStatusLedString)')
snAgentBrdTrafficLeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdTrafficLeds.setStatus('deprecated')
if mibBuilder.loadTexts: snAgentBrdTrafficLeds.setDescription('A bit array that contains the value of the front panel traffic LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Traffic LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (no traffic) 1 on (traffic) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 (It was obsoleted after release 07100, replaced by snAgentBrdTrafficLedString)')
snAgentBrdMediaLeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdMediaLeds.setStatus('deprecated')
if mibBuilder.loadTexts: snAgentBrdMediaLeds.setDescription('A bit array that contains the value of the front panel media LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Media LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning --------- ------- 0 half duplex 1 full duplex The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 (It was obsoleted after release 07100, replaced by snAgentBrdMediaLedString)')
snAgentBrdSpeedLeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdSpeedLeds.setStatus('deprecated')
if mibBuilder.loadTexts: snAgentBrdSpeedLeds.setDescription('A bit array that contains the value of the front panel media LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Speed LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning --------- ------- 0 10 MBit 1 100 MBit The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 (It was obsoleted after release 07100, replaced by snAgentBrdSpeedLedString)')
snAgentBrdModuleStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 3, 4, 8, 9, 10, 11))).clone(namedValues=NamedValues(("moduleEmpty", 0), ("moduleGoingDown", 2), ("moduleRejected", 3), ("moduleBad", 4), ("moduleConfigured", 8), ("moduleComingUp", 9), ("moduleRunning", 10), ("moduleBlocked", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdModuleStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdModuleStatus.setDescription('By default, this mode is set to notActivated(0). moduleEmpty(0) ......... The slot of the chassis is empty. moduleGoingDown(2) ..... The module is going down. moduleRejected(3) ...... The module is being rejected due to wrong configuration. moduleBad(4) ........... The module Hardware is bad. moduleConfigured(8) ...... The module is configured (stacking) moduleComingUp(9) ...... The module is in power-up cycle. moduleRunning(10) ....... The module is running. moduleBlocked(11) ....... The module is blocked, for full height card.')
snAgentBrdRedundantStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("active", 2), ("standby", 3), ("crashed", 4), ("comingUp", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdRedundantStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdRedundantStatus.setDescription('The redundant status of a module. Non-management module always returns other(1). Management module returns the rest of the states.')
snAgentBrdAlarmLeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdAlarmLeds.setStatus('deprecated')
if mibBuilder.loadTexts: snAgentBrdAlarmLeds.setDescription('A bit array that contains the value of the front panel media LEDs (for POS Module Only). This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Speed LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning --------- ------- 0 no alarm 1 alarm The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 (It was obsoleted after release 07100, replaced by snAgentBrdAlarmLedString)')
snAgentBrdTxTrafficLeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdTxTrafficLeds.setStatus('deprecated')
if mibBuilder.loadTexts: snAgentBrdTxTrafficLeds.setDescription('A bit array that contains the value of the front panel transmit traffic LEDs (for POS Module Only). This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Transmit Traffic LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (no transmit traffic) 1 on (transmit traffic) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 (It was obsoleted after release 07100, replaced by snAgentBrdTxTrafficLedString)')
snAgentBrdRxTrafficLeds = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdRxTrafficLeds.setStatus('deprecated')
if mibBuilder.loadTexts: snAgentBrdRxTrafficLeds.setDescription('A bit array that contains the value of the front panel receive traffic LEDs (for POS Module Only). This is a packed bit string; each LED is encoded into 1 bit for each switch port. The maximum number of ports in one chassis is 24 that means 24 Ports Receive Traffic LEDs. The expansion port number always begins from the last main port number. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (no receive traffic) 1 on (receive traffic) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 (It was obsoleted after release 07100, replaced by snAgentBrdRxTrafficLedString)')
snAgentBrdStatusLedString = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 17), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdStatusLedString.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdStatusLedString.setDescription('A bit array that contains the value of the front panel status LEDs. This is a bit-map; each LED is encoded into 1 bit for each switch port. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (Link off) 1 on (Link on) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 --------')
snAgentBrdTrafficLedString = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 18), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdTrafficLedString.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdTrafficLedString.setDescription('A bit array that contains the value of the front panel traffic LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (no traffic) 1 on (traffic) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 --------')
snAgentBrdMediaLedString = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 19), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdMediaLedString.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdMediaLedString.setDescription('A bit array that contains the value of the front panel media LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The following shows the meaning of each bit for each switch port: bit value meaning --------- ------- 0 half duplex 1 full duplex The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 ---------')
snAgentBrdSpeedLedString = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 20), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdSpeedLedString.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdSpeedLedString.setDescription('A bit array that contains the value of the front panel media LEDs. This is a packed bit string; each LED is encoded into 1 bit for each switch port. The following shows the meaning of each bit for each switch port: bit value meaning --------- ------- 0 10 MBit 1 100 MBit The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 ---------')
snAgentBrdAlarmLedString = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 21), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdAlarmLedString.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdAlarmLedString.setDescription('A bit array that contains the value of the front panel media LEDs (for POS Module Only). This is a packed bit string; each LED is encoded into 1 bit for each switch port. The following shows the meaning of each bit for each switch port: bit value meaning --------- ------- 0 no alarm 1 alarm The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 ---------')
snAgentBrdTxTrafficLedString = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 22), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdTxTrafficLedString.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdTxTrafficLedString.setDescription('A bit array that contains the value of the front panel transmit traffic LEDs (for POS Module Only). This is a packed bit string; each LED is encoded into 1 bit for each switch port. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (no transmit traffic) 1 on (transmit traffic) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 --------')
snAgentBrdRxTrafficLedString = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 23), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdRxTrafficLedString.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdRxTrafficLedString.setDescription('A bit array that contains the value of the front panel receive traffic LEDs (for POS Module Only). This is a packed bit string; each LED is encoded into 1 bit for each switch port. The following shows the meaning of each bit for each switch port: bit value meaning -------- --------- 0 off (no receive traffic) 1 on (receive traffic) The bitmap of LEDs are as following: (Port1) (Port4) (Port8) Bit (Bit0) (Bit3) (Bit7) Byte 1: LED1 LED2 LED3 LED4 LED5 LED6 LED7 LED8 Byte 2: LED9 LED10 LED11 LED12 LED13 LED14 LED15 LED16 Byte 3: LED17 LED18 LED19 LED20 LED21 LED22 LED23 LED24 --------')
snAgentBrdMemoryTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 24), CounterBasedGauge64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdMemoryTotal.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdMemoryTotal.setDescription('The total memory in bytes within this module.')
snAgentBrdMemoryAvailable = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 25), CounterBasedGauge64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdMemoryAvailable.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdMemoryAvailable.setDescription('The total memory in bytes available for use within this module.')
snAgentBrdSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 26), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdSerialNumber.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdSerialNumber.setDescription('The Board Serial number. Zero length string indicates that module serial number has not been programmed within EEPROM or the module does not support serial number within EEPROM.')
snAgentBrdPartNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 27), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdPartNumber.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdPartNumber.setDescription('The Board Part number. Zero length string indicates that module part number has not been programmed within EEPROM or the module does not support part number within EEPROM.')
snAgentBrdMemoryUtil100thPercent = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 28), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdMemoryUtil100thPercent.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdMemoryUtil100thPercent.setDescription('Dynamic memory utilization within this module in units of one-hundredth of a percent.')
snAgentBrdUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 1, 1, 29), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrdUpTime.setStatus('current')
if mibBuilder.loadTexts: snAgentBrdUpTime.setDescription('Uptime for this module in units of one-hundredth of a second. This value is only valid if the value of snAgentBrdModuleStatus is moduleRunning.')
snAgentBrd2Table = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2), )
if mibBuilder.loadTexts: snAgentBrd2Table.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2Table.setDescription('A table of each physical board information for each unit.')
snAgentBrd2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentBrd2Unit"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentBrd2Slot"))
if mibBuilder.loadTexts: snAgentBrd2Entry.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2Entry.setDescription('A row in the Agent Board table.')
snAgentBrd2Unit = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrd2Unit.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2Unit.setDescription('The index to the Agent module Table.')
snAgentBrd2Slot = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrd2Slot.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2Slot.setDescription('The index to the Agent module Table.')
snAgentBrd2MainBrdDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrd2MainBrdDescription.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2MainBrdDescription.setDescription('The main board description string.')
snAgentBrd2MainBrdId = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2, 1, 4), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrd2MainBrdId.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2MainBrdId.setDescription('The main board identifier, which can uniquely identify a board type. It is an encoded octet string with the following meaning: octet 0 - octet string format version, which identifies the format of this string. If format version octet has the value 2, the octets after the version octet have the following meaning: octet 1 - product type: BI_WG 0x57 BI_BB 0x42 BI_NI 0x4E BI_NI2 0x32 NI_M4 0x4D BI_SLB 0x53 octet 2 - module type: MASTER_FIBER_8G 0x0 MASTER_FIBER_4G 0x1 MASTER_COPPER_16 0x2 SLAVE_FIBER_4G 0x3 FI_MASTER_FIBER_2G 0x4 FI_MASTER_FIBER_4G 0x5 MASTER_COPPER_8G 0x6 FI_MASTER_FIBER_8G 0x7 SLAVE_FIBER_8G 0x8 MASTER_COPPER_12_2 0x9 SLAVE_COPPER_24 0xA FI_SLAVE_COPPER_24 0xB SLAVE_100FX_8 0xD SLAVE_100FX_16 0xC SLAVE_COPPER_8G 0xE SLAVE_COPPER_16_2 0xF STACK_FIBER_8G 0x10 STACK_COPPER_8G 0x11 MASTER_FIBER_2G 0x12 SLAVE_100FX_24 0x13 MASTER_FIBER_0G 0x14 POS_622M 0x15 POS_155M 0x16 SLAVE_FIBER_2G 0x17 SLAVE_COPPER_2G 0x18 FI_SLAVE_FIBER_2G 0x19 FI_SLAVE_FIBER_4G 0x1A FI_SLAVE_FIBER_8G 0x1B FI_SLAVE_COPPER_8G 0x1C FI_MASTER_COPPER_8G 0x1D POS_155M2P 0x1E FI_MASTER_COPPER_4G 0x1F FI_MASTER_COPPER_2G 0x20 MASTER_COPPER_4G 0x21 MASTER_COPPER_2G 0x22 MASTER_M4_8G 0x23 MASTER_M4_4G 0x24 MASTER_M4_2G 0x25 MASTER_M4_0G 0x26 MASTER_M5_0G 0x27 POS_2488M 0x28 SLAVE_M5_0G 0x29 POS_N2488M 0x2A STACK_IPC_48_2 0x2B SLAVE_NPA_FIBER_4G 0x2C ATM_2PORT 0x2D ATM_4PORT 0x2E SLAVE_FIBER_10G 0x2F STACK_FES_48_2 0x30 STACK_FES_24_2 0x31 STACK_FES_96_4 0x32 STACK_FES_12G 0x33 STACK_FESX_24G 0x34 STACK_FESX_24_2_G 0x35 STACK_FESX_24_1_G 0x36 STACK_FESX_48G 0x37 STACK_FESX_48_2_G 0x38 STACK_FESX_48_1_G 0x39 SUPERX_FI_MGMT 0x40 SUPERX_FI_2P10G 0x41 SUPERX_FI_24GC 0x42 SUPERX_FI_24GF 0x43 SUPERX_FI_2P10G_WAN 0x44 SUPERX_FI_MGMT_II 0x4a SLAVE_JC_48E 0xC3 SLAVE_JC_48T 0xC4 MASTER_JC_M4_8G 0xC5 SLAVE_JC_8G 0xC6 SLAVE_JC_B16GF 0xC8 MASTER_JC_B2404 0xC9 SLAVE_JC_B16GC 0xCA octet 3 - processor type, PVR_M603=3, PVR_M604=4, PVR_M603E=6, PVR_M603EV=7, PVR_M750=8, PVR_M604E=9, PVR_M8245=81 octet 4 to octet 5 - processor speed in MHz octet 6 - MAC type: MAC_NONE=0 MAC_SEEQ_10_100=1, MAC_DEC_10_100=2, MAC_3COM_10_100=3, MAC_X10GMAC_10000=4, MAC_SEEQ_1000=5, MAC_GMAC_1000=6, MAC_VLSI_1000=7 octet 7 - PHY type, PHY_NONE=0, PHY_QSI=1, PHY_BROADCOM=2, PHY_ICS=3, PHY_NATIONAL=4, PHY_LEVEL1=6, PHY_BROADCOM_10_100=7, PHY_LEVEL24=8, PHY_BROADCOM_10000=9 (for 10G), PHY_3COM_10_100=9 (for others) octet 8 - port type, COPPER=0, FIBER=1 octet 9 - fiber port type, NONFIBER=0, SX_FIBER=1, LX_FIBER=2, LHX_FIBER=3, LX_SX_FIBER=4, LHB_FIBER=5 octet 10 to octet 13 - DRAM size in KBytes octet 14 to octet 17 - boot flash size in KBytes octet 18 to octet 21 - code flash size in KBytes octet 22 to octet 27 - serial number. octet 28 - chassis backplane type. chassis4000 = 0x00 chassis8000 = 0x02 chassis15000 = 0x01 chassisFISX = 0x04 Turbo8 = 0x07 (stack2) FastIron2 = 0x06 (stack1)')
snAgentBrd2MainPortTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrd2MainPortTotal.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2MainPortTotal.setDescription('The total number of ports for the main board.')
snAgentBrd2ModuleStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 2, 3, 4, 8, 9, 10, 11))).clone(namedValues=NamedValues(("moduleEmpty", 0), ("moduleGoingDown", 2), ("moduleRejected", 3), ("moduleBad", 4), ("moduleConfigured", 8), ("moduleComingUp", 9), ("moduleRunning", 10), ("moduleBlocked", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrd2ModuleStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2ModuleStatus.setDescription('By default, this mode is set to notActivated(0). moduleEmpty(0) ......... The slot of the chassis is empty. moduleGoingDown(2) ..... The module is going down. moduleRejected(3) ...... The module is being rejected due to wrong configuration. moduleBad(4) ........... The module Hardware is bad. moduleConfigured(8) ...... The module is configured (stacking) moduleComingUp(9) ...... The module is in power-up cycle. moduleRunning(10) ....... The module is running. moduleBlocked(11) ....... The module is blocked, for full height card.')
snAgentBrd2RedundantStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("active", 2), ("standby", 3), ("crashed", 4), ("comingUp", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentBrd2RedundantStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentBrd2RedundantStatus.setDescription('The redundant status of a module. Non-management module always returns other(1). Management module returns the rest of the states.')
snAgTrpRcvrTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1), )
if mibBuilder.loadTexts: snAgTrpRcvrTable.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrTable.setDescription('A table of managers which to send traps.')
snAgTrpRcvrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgTrpRcvrIndex"))
if mibBuilder.loadTexts: snAgTrpRcvrEntry.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrEntry.setDescription('A row in the trap receiver table. The column snAgTrpRcvrStatus is used to create and delete rows in the table. Creation requires a SET PDU with objects snAgTrpRcvrIndex, snAgTrpRcvrIpAddr, snAgTrpRcvrComm and snAgTrpRcvrStatus.')
snAgTrpRcvrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgTrpRcvrIndex.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrIndex.setDescription('The index to the Trap Receiver Table.')
snAgTrpRcvrIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTrpRcvrIpAddr.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrIpAddr.setDescription('The ip address for SNMP manager that is to receive the trap.')
snAgTrpRcvrCommunityOrSecurityName = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTrpRcvrCommunityOrSecurityName.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrCommunityOrSecurityName.setDescription('Community string to use. In case of USM (SNMPv3) security model, this object is used to provide the security name.')
snAgTrpRcvrStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("valid", 2), ("delete", 3), ("create", 4), ("ignore", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTrpRcvrStatus.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrStatus.setDescription("This object is used to create and delete rows in the table and control if they are used. The values that can be written are: ignore(5)...don't use this entry to send traps to at this time delete(3)...deletes the row create(4)...creates a new row If the row exists, then a SET with value of create(5) returns error 'badValue'. Deleted rows go away immediately. The following values can be returned on reads: other(1)....some other case valid(2)....the row exists and is valid ignore(5)...don't use this entry to send traps to at this time")
snAgTrpRcvrUDPPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535)).clone(162)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTrpRcvrUDPPort.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrUDPPort.setDescription('UDP port number of the trap receiver.')
snAgTrpRcvrSecurityModel = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("v1", 1), ("v2c", 2), ("usm", 3))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTrpRcvrSecurityModel.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrSecurityModel.setDescription('Version of trap format to be used.')
snAgTrpRcvrSecurityLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("noAuth", 1), ("auth", 2), ("authPriv", 3))).clone(1)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgTrpRcvrSecurityLevel.setStatus('deprecated')
if mibBuilder.loadTexts: snAgTrpRcvrSecurityLevel.setDescription('Used for USM (SNMPv3) security model to specify the level of security. The security name is provided by snAgTrpRcvrCommunityOrSecurityName.')
snAgBootSeqTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 1), )
if mibBuilder.loadTexts: snAgBootSeqTable.setStatus('current')
if mibBuilder.loadTexts: snAgBootSeqTable.setDescription('A table of image load sequnce instructions to the boot code. Boot code will start from instruction of the first valid entry to load the image. If failed, it will use the second valid entry and so on, until a succesful load has completed. Each entry must have an unique instruction, duplicate instructions will be rejected.')
snAgBootSeqEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgBootSeqIndex"))
if mibBuilder.loadTexts: snAgBootSeqEntry.setStatus('current')
if mibBuilder.loadTexts: snAgBootSeqEntry.setDescription('A row in the boot sequence table.')
snAgBootSeqIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgBootSeqIndex.setStatus('current')
if mibBuilder.loadTexts: snAgBootSeqIndex.setDescription('The index to the boot sequence table.')
snAgBootSeqInstruction = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("fromPrimaryFlash", 1), ("fromSecondaryFlash", 2), ("fromTftpServer", 3), ("fromBootpServer", 4), ("fromPcmciaCard1", 5), ("fromPcmciaCard2", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgBootSeqInstruction.setStatus('current')
if mibBuilder.loadTexts: snAgBootSeqInstruction.setDescription('The instruction for the boot code.')
snAgBootSeqIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 1, 1, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgBootSeqIpAddr.setStatus('current')
if mibBuilder.loadTexts: snAgBootSeqIpAddr.setDescription("The ip address of the TFTP server if snAgBootSeqInstruction was set to 'fromTftpServer'; otherwise, this object is not used in any other boot instruction.")
snAgBootSeqFilename = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 1, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgBootSeqFilename.setStatus('current')
if mibBuilder.loadTexts: snAgBootSeqFilename.setDescription("The image filename on the TFTP server if snAgBootSeqInstruction was set to 'fromTftpServer'; otherwise, this object is not used in any other boot instruction.")
snAgBootSeqRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("valid", 2), ("delete", 3), ("create", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgBootSeqRowStatus.setStatus('current')
if mibBuilder.loadTexts: snAgBootSeqRowStatus.setDescription('To create or delete a boot sequence table entry.')
snAgSpBootSeqTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 2), )
if mibBuilder.loadTexts: snAgSpBootSeqTable.setStatus('current')
if mibBuilder.loadTexts: snAgSpBootSeqTable.setDescription('A table of image load sequnce instructions to the boot code for the slave (line) modules. Boot code will start from instruction of the first valid entry to load the image. If failed, it will use the second valid entry and so on, until a succesful load has completed. Each entry must have an unique instruction, duplicate instructions will be rejected.')
snAgSpBootSeqEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgSpBootSeqSpNumber"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgSpBootSeqIndex"))
if mibBuilder.loadTexts: snAgSpBootSeqEntry.setStatus('current')
if mibBuilder.loadTexts: snAgSpBootSeqEntry.setDescription('A row in the slave module boot sequence table.')
snAgSpBootSeqSpNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 32)))
if mibBuilder.loadTexts: snAgSpBootSeqSpNumber.setStatus('current')
if mibBuilder.loadTexts: snAgSpBootSeqSpNumber.setDescription('The slot numner of a slave/witch processor module for which this boot sequence applies. Setting value 0 applies to all SP modules. Index 0 is valid only for setting to simplify the set operation for all the modules.')
snAgSpBootSeqIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 2, 1, 2), Integer32())
if mibBuilder.loadTexts: snAgSpBootSeqIndex.setStatus('current')
if mibBuilder.loadTexts: snAgSpBootSeqIndex.setDescription('The index to the boot sequence table.')
snAgSpBootSeqInstruction = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("fromSpPrimaryFlash", 1), ("fromSpSecondaryFlash", 2), ("fromMpPrimaryFlash", 3), ("fromMpSecondaryFlash", 4), ("fromPcmciaCard1", 5), ("fromPcmciaCard2", 6), ("fromTftpServer", 7), ("interactively", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSpBootSeqInstruction.setStatus('current')
if mibBuilder.loadTexts: snAgSpBootSeqInstruction.setDescription('The instruction for the boot code.')
snAgSpBootSeqIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 2, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSpBootSeqIpAddr.setStatus('current')
if mibBuilder.loadTexts: snAgSpBootSeqIpAddr.setDescription("The ip address of the TFTP server if snAgBootSeqInstruction was set to 'fromTftpServer'; otherwise, this object is not used in any other boot instruction.")
snAgSpBootSeqFilename = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSpBootSeqFilename.setStatus('current')
if mibBuilder.loadTexts: snAgSpBootSeqFilename.setDescription("The image filename on the TFTP server if snAgBootSeqInstruction was set to 'fromTftpServer'; otherwise, this object is not used in any other boot instruction.")
snAgSpBootSeqRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 4, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("valid", 1), ("delete", 2), ("create", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSpBootSeqRowStatus.setStatus('current')
if mibBuilder.loadTexts: snAgSpBootSeqRowStatus.setDescription('To create or delete a boot sequence table entry.')
snAgCfgEosTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 5, 1), )
if mibBuilder.loadTexts: snAgCfgEosTable.setStatus('current')
if mibBuilder.loadTexts: snAgCfgEosTable.setDescription('This table represents the fragmented Configuration File data packet with checksum include in each rows of this table. A SNMP-SET represents configuration file download, and a SNMP-GET represents configuration file upload. This action is only if the SNMP-SET of snAgCfgLoad command is sent along with this table consecutively. The applicable snAgCfgLoad command value is as followings: uploadFromFlashToNMS(23), downloadToFlashFromNMS(24), uploadFromDramToNMS(25), downloadToDramFromNMS(26).')
snAgCfgEosEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 5, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgCfgEosIndex"))
if mibBuilder.loadTexts: snAgCfgEosEntry.setStatus('current')
if mibBuilder.loadTexts: snAgCfgEosEntry.setDescription('An EOS row in the table of encoded octet strings for table snAgCfgEosTable. ')
snAgCfgEosIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 5, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgCfgEosIndex.setStatus('current')
if mibBuilder.loadTexts: snAgCfgEosIndex.setDescription('Each VLAN EOS Buffer Identifier have a multiple VLAN table entries.')
snAgCfgEosPacket = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 5, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgCfgEosPacket.setStatus('current')
if mibBuilder.loadTexts: snAgCfgEosPacket.setDescription('An encoded octet string. On reads it contains an integral number of configuration file data packets. The size of each encoded octet string is less than or equal to 1400 bytes.')
snAgCfgEosChkSum = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 5, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgCfgEosChkSum.setStatus('current')
if mibBuilder.loadTexts: snAgCfgEosChkSum.setDescription('A checksum of each configuration file data packet.')
snStackGen = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 1))
snStackSecSwitchInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2))
snStackPriSwitchMode = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackPriSwitchMode.setStatus('current')
if mibBuilder.loadTexts: snStackPriSwitchMode.setDescription('The Stackable Management Primary Switch mode either enabled or disabled and the default is disabled mode. enabled(1)...........primary switch enabled disabled(0)..........primary switch disabled')
snStackMaxSecSwitch = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snStackMaxSecSwitch.setStatus('current')
if mibBuilder.loadTexts: snStackMaxSecSwitch.setDescription('The Maximum Secondary Switches are allowed in the Stackable Management Group.')
snStackTotalSecSwitch = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snStackTotalSecSwitch.setStatus('current')
if mibBuilder.loadTexts: snStackTotalSecSwitch.setDescription('The Total Secondary Switches currently connected to the Stackable Management Group.')
snStackSyncAllSecSwitch = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("normal", 0), ("invalid", 1), ("device", 2), ("global", 3), ("local", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackSyncAllSecSwitch.setStatus('current')
if mibBuilder.loadTexts: snStackSyncAllSecSwitch.setDescription('Synchronize all the secondary switches in the Stackable Management Group with the following commands. device(2)...........device related parameters. global(3)...........global parameters. local(4)............local parameters. The return result of the above commands are either normal(0)...........normal condition. invalid(1)...........invalid result.')
snStackSmSlotIndex = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 8))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackSmSlotIndex.setStatus('current')
if mibBuilder.loadTexts: snStackSmSlotIndex.setDescription('The Slot 0 is the Master slot and Slot 1-8 are slaves. All the slot-based MIB data can be retrieved with respect to this slot index such as snChassis global MIB. ')
snStackFmpSetProcess = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2))).clone(namedValues=NamedValues(("normal", 0), ("pending", 1), ("failure", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snStackFmpSetProcess.setStatus('current')
if mibBuilder.loadTexts: snStackFmpSetProcess.setDescription('normal(0) - The set process is either in an idle state or FMP-SET-SUCCESS state. pending(1) - The pending process is waiting for the result of an FMP-SET. failure(2) - The failure result of an FMP-SET.')
snStackSecSwitchTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1), )
if mibBuilder.loadTexts: snStackSecSwitchTable.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchTable.setDescription('If snSwGroupOperMode is configured as basic mode which is VLAN by Port, Layer2 switching, then this table is valid. Each VLAN switch port could have a number of VLAN IDs.')
snStackSecSwitchEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snStackSecSwitchIndex"))
if mibBuilder.loadTexts: snStackSecSwitchEntry.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchEntry.setDescription('An entry in the Stackable Management Secondary Switch Information table.')
snStackSecSwitchIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 26))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snStackSecSwitchIndex.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchIndex.setDescription('The secondary switch index must not be greater than the snStackMaxSecSwitch.')
snStackSecSwitchSlotId = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 26))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackSecSwitchSlotId.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchSlotId.setDescription('The secondary switch slot ID must be set before the Configuration command sent from the primary switch to the secondary switch either manually or automatically - snStackSecSwitchCfgCmd.')
snStackSecSwitchPortCnts = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 26))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snStackSecSwitchPortCnts.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchPortCnts.setDescription('The number of ports in this secondary switch.')
snStackSecSwitchEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackSecSwitchEnabled.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchEnabled.setDescription('The secondary switch has been selected to Stackable Management Group.')
snStackSecSwitchAck = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snStackSecSwitchAck.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchAck.setDescription('The secondary switch has sent reponse to the primary switch.')
snStackSecSwitchMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 6), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snStackSecSwitchMacAddr.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchMacAddr.setDescription('The secondary switch physical address. The physical address represents a MAC Station.')
snStackSecSwitchSyncCmd = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("normal", 0), ("invalid", 1), ("device", 2), ("global", 3), ("local", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackSecSwitchSyncCmd.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchSyncCmd.setDescription('Synchronize the secondary switches in the Stackable Management Group with the following commands. device(2)...........device related parameters. global(3)...........global parameters. local(4)............local parameters. The return result of the above commands are either normal(0)...........normal condition. invalid(1)...........invalid result.')
snStackSecSwitchIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 8), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackSecSwitchIpAddr.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchIpAddr.setDescription('The secondary switch IP Address and is used for manual-command of snStackSecSwitchCfgCmd.')
snStackSecSwitchSubnetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 9), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackSecSwitchSubnetMask.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchSubnetMask.setDescription('The secondary switch IP Subnet Mask and is used for manual-command of snStackSecSwitchCfgCmd.')
snStackSecSwitchCfgCmd = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 5, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("normal", 0), ("invalid", 1), ("auto", 2), ("manual", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snStackSecSwitchCfgCmd.setStatus('current')
if mibBuilder.loadTexts: snStackSecSwitchCfgCmd.setDescription('snStackSecSwitchSlotId must be set before the Configuration command sent from the primary switch to the secondary switch either manually or automatically. auto(2)...........auto-configuration command sent. manual(3).........manual-configuration command sent. if manual-mode is used, snStackSecSwitchIpAddr and snStackSecSwitchSubnetMask must be first set before this command is sent. The return result of the above commands are either normal(0)...........normal condition. invalid(1)..........invalid result.')
snAgSysLogGbl = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1))
snAgSysLogGblEnable = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSysLogGblEnable.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblEnable.setDescription('Enable/disable system logging.')
snAgSysLogGblBufferSize = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 2), Integer32().clone(50)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSysLogGblBufferSize.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblBufferSize.setDescription('The number of dynamic system logging entries.')
snAgSysLogGblClear = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("normal", 0), ("clearAll", 1), ("clearDynamic", 2), ("clearStatic", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSysLogGblClear.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblClear.setDescription('Clear dynamic and/or static system logging buffers.')
snAgSysLogGblCriticalLevel = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 4), Integer32().clone(255)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSysLogGblCriticalLevel.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblCriticalLevel.setDescription('Filter events going to the logging buffer. This is a packed bit string object of which each bit has the following meaning: (bit 0 is the least significant bit). bit position meaning ------------ ------- 8-31 reserved 7 Warning (warning conditions) 6 Notification (normal but significant conditions) 5 Informational (informational messages) 4 Error (error conditions) 3 Emergency (system is unusable) 2 Debugging (debugging messages) 1 Critical (critical conditions) 0 Alert (immediate action needed) Setting a critical level bit to 1 makes the logging buffer accept the corresponding event. Resetting a critical level bit to 0 makes the logging buffer reject the corresponding event.')
snAgSysLogGblLoggedCount = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogGblLoggedCount.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblLoggedCount.setDescription('The number events logged in the system logging buffer.')
snAgSysLogGblDroppedCount = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogGblDroppedCount.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblDroppedCount.setDescription('The number of events dropped.')
snAgSysLogGblFlushedCount = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogGblFlushedCount.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblFlushedCount.setDescription('The number of times that the system logging buffer was cleared.')
snAgSysLogGblOverrunCount = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogGblOverrunCount.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblOverrunCount.setDescription('The number of times that the system logging buffer was wrapped around.')
snAgSysLogGblServer = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 9), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSysLogGblServer.setStatus('deprecated')
if mibBuilder.loadTexts: snAgSysLogGblServer.setDescription('IP address of syslog server.')
snAgSysLogGblFacility = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24))).clone(namedValues=NamedValues(("kern", 1), ("user", 2), ("mail", 3), ("daemon", 4), ("auth", 5), ("syslog", 6), ("lpr", 7), ("news", 8), ("uucp", 9), ("sys9", 10), ("sys10", 11), ("sys11", 12), ("sys12", 13), ("sys13", 14), ("sys14", 15), ("cron", 16), ("local0", 17), ("local1", 18), ("local2", 19), ("local3", 20), ("local4", 21), ("local5", 22), ("local6", 23), ("local7", 24))).clone('user')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSysLogGblFacility.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblFacility.setDescription('Facility code.')
snAgSysLogGblPersistenceEnable = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSysLogGblPersistenceEnable.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogGblPersistenceEnable.setDescription('Enable/disable system logging persistence.')
snAgSysLogBufferTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 2), )
if mibBuilder.loadTexts: snAgSysLogBufferTable.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogBufferTable.setDescription('Dynamic system logging buffer table.')
snAgSysLogBufferEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgSysLogBufferIndex"))
if mibBuilder.loadTexts: snAgSysLogBufferEntry.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogBufferEntry.setDescription('A row in the dynamic system logging buffer table.')
snAgSysLogBufferIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogBufferIndex.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogBufferIndex.setDescription('The index to the dynamic system logging buffer table.')
snAgSysLogBufferTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 2, 1, 2), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogBufferTimeStamp.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogBufferTimeStamp.setDescription('A time stamp when the event is logged.')
snAgSysLogBufferCriticalLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("other", 1), ("alert", 2), ("critical", 3), ("debugging", 4), ("emergency", 5), ("error", 6), ("informational", 7), ("notification", 8), ("warning", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogBufferCriticalLevel.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogBufferCriticalLevel.setDescription('The critical level of this event.')
snAgSysLogBufferMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 2, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogBufferMessage.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogBufferMessage.setDescription('The system logging message.')
snAgSysLogBufferCalTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogBufferCalTimeStamp.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogBufferCalTimeStamp.setDescription('A time stamp when the event is logged. This object returns a NULL terminated time stamp string if the system calendar time was set. It returns a zero length string if the system calendar time was not set.')
snAgStaticSysLogBufferTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 3), )
if mibBuilder.loadTexts: snAgStaticSysLogBufferTable.setStatus('current')
if mibBuilder.loadTexts: snAgStaticSysLogBufferTable.setDescription('Static system logging buffer table.')
snAgStaticSysLogBufferEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 3, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgStaticSysLogBufferIndex"))
if mibBuilder.loadTexts: snAgStaticSysLogBufferEntry.setStatus('current')
if mibBuilder.loadTexts: snAgStaticSysLogBufferEntry.setDescription('A row in the static system logging buffer table.')
snAgStaticSysLogBufferIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgStaticSysLogBufferIndex.setStatus('current')
if mibBuilder.loadTexts: snAgStaticSysLogBufferIndex.setDescription('The index to the static system logging buffer table.')
snAgStaticSysLogBufferTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 3, 1, 2), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgStaticSysLogBufferTimeStamp.setStatus('current')
if mibBuilder.loadTexts: snAgStaticSysLogBufferTimeStamp.setDescription('A time stamp when the event is logged.')
snAgStaticSysLogBufferCriticalLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 3, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("other", 1), ("alert", 2), ("critical", 3), ("debugging", 4), ("emergency", 5), ("error", 6), ("informational", 7), ("notification", 8), ("warning", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgStaticSysLogBufferCriticalLevel.setStatus('current')
if mibBuilder.loadTexts: snAgStaticSysLogBufferCriticalLevel.setDescription('The critical level of this event.')
snAgStaticSysLogBufferMessage = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 3, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgStaticSysLogBufferMessage.setStatus('current')
if mibBuilder.loadTexts: snAgStaticSysLogBufferMessage.setDescription('The system logging message.')
snAgStaticSysLogBufferCalTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 3, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgStaticSysLogBufferCalTimeStamp.setStatus('current')
if mibBuilder.loadTexts: snAgStaticSysLogBufferCalTimeStamp.setDescription('A time stamp when the event is logged. This object returns a NULL terminated time stamp string if the system calendar time was set. It returns a zero length string if the system calendar time was not set.')
snAgSysLogServerTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 4), )
if mibBuilder.loadTexts: snAgSysLogServerTable.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogServerTable.setDescription('System Log Server table.')
snAgSysLogServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 4, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgSysLogServerIP"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgSysLogServerUDPPort"))
if mibBuilder.loadTexts: snAgSysLogServerEntry.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogServerEntry.setDescription('A row in the SysLog Server table.')
snAgSysLogServerIP = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 4, 1, 1), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogServerIP.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogServerIP.setDescription('IP address of syslog server.')
snAgSysLogServerUDPPort = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSysLogServerUDPPort.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogServerUDPPort.setDescription('UDP port number of syslog server.')
snAgSysLogServerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 6, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("valid", 2), ("delete", 3), ("create", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgSysLogServerRowStatus.setStatus('current')
if mibBuilder.loadTexts: snAgSysLogServerRowStatus.setDescription("This object is used to create and delete row in the table and control if they are used. The values that can be written are: delete(3)...deletes the row create(4)...creates a new row If the row exists, then a SET with value of create(4) returns error 'badValue'. Deleted rows go away immediately. The following values can be returned on reads: other(1)....some other case valid(2)....the row exists and is valid")
snAgentSysParaConfigTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7, 1), )
if mibBuilder.loadTexts: snAgentSysParaConfigTable.setStatus('current')
if mibBuilder.loadTexts: snAgentSysParaConfigTable.setDescription('A table of Agent of each board.')
snAgentSysParaConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentSysParaConfigIndex"))
if mibBuilder.loadTexts: snAgentSysParaConfigEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentSysParaConfigEntry.setDescription('A row in the Agent System Parameters Configuation table.')
snAgentSysParaConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentSysParaConfigIndex.setStatus('current')
if mibBuilder.loadTexts: snAgentSysParaConfigIndex.setDescription('The index to the Agent System Parameters Configuation Table.')
snAgentSysParaConfigDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentSysParaConfigDescription.setStatus('current')
if mibBuilder.loadTexts: snAgentSysParaConfigDescription.setDescription('The main board description string.')
snAgentSysParaConfigMin = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentSysParaConfigMin.setStatus('current')
if mibBuilder.loadTexts: snAgentSysParaConfigMin.setDescription('The minimum value of this Agent System Parameter.')
snAgentSysParaConfigMax = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentSysParaConfigMax.setStatus('current')
if mibBuilder.loadTexts: snAgentSysParaConfigMax.setDescription('The maximum value of this Agent System Parameter.')
snAgentSysParaConfigDefault = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentSysParaConfigDefault.setStatus('current')
if mibBuilder.loadTexts: snAgentSysParaConfigDefault.setDescription('The default value of this Agent System Parameter.')
snAgentSysParaConfigCurrent = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 7, 1, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentSysParaConfigCurrent.setStatus('current')
if mibBuilder.loadTexts: snAgentSysParaConfigCurrent.setDescription('The current configurated value of this Agent System Parameter.')
snAgentConfigModuleTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1), )
if mibBuilder.loadTexts: snAgentConfigModuleTable.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleTable.setDescription('A table of each configured module information.')
snAgentConfigModuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentConfigModuleIndex"))
if mibBuilder.loadTexts: snAgentConfigModuleEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleEntry.setDescription('A row in the Agent Configured Module table.')
snAgentConfigModuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModuleIndex.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleIndex.setDescription('The index to the Agent Configured Module Table.')
snAgentConfigModuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 103, 112, 113, 114, 144, 145, 152, 153, 154, 155, 160, 161, 168, 169, 176, 177, 180, 181, 184, 185, 192, 195, 196, 197, 198, 200, 201, 202, 206, 207, 208, 209, 212, 214, 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1093, 1094, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1105, 1106, 1107, 1108, 1109, 1110, 1111, 1112, 1113, 1114, 1115, 1116, 1117, 1118, 1119, 1120, 1121, 1122, 1123, 1124, 1125, 2064, 2065, 2066, 2067, 2068, 2069, 2074, 2080, 2081, 2083, 2096, 2098, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2112, 2113, 2208, 2209, 2220, 2240, 2241, 2244, 2245, 2246, 2248, 2249, 2016, 2017, 2020, 2021, 2024, 2137, 2138, 2139, 2032, 2033, 2036, 2037, 2040, 2140, 2141, 2142, 2055, 2056, 2057, 2136, 2135, 2134, 2132, 2133, 2224, 2225, 2227, 2228, 2229, 2233, 2234, 2235, 2236))).clone(namedValues=NamedValues(("bi8PortGigManagementModule", 0), ("bi4PortGigManagementModule", 1), ("bi16PortCopperManagementModule", 2), ("bi4PortGigModule", 3), ("fi2PortGigManagementModule", 4), ("fi4PortGigManagementModule", 5), ("bi8PortGigCopperManagementModule", 6), ("fi8PortGigManagementModule", 7), ("bi8PortGigModule", 8), ("bi12PortGigCopper2PortGigFiberManagement", 9), ("bi24PortCopperModule", 10), ("fi24PortCopperModule", 11), ("bi16Port100FXModule", 12), ("bi8Port100FXModule", 13), ("bi8PortGigCopperModule", 14), ("bi12PortGigCopper2PortGigFiber", 15), ("bi2PortGigManagementModule", 18), ("bi24Port100FXModule", 19), ("bi0PortManagementModule", 20), ("pos622MbsModule", 21), ("pos155MbsModule", 22), ("bi2PortGigModule", 23), ("bi2PortGigCopperModule", 24), ("fi2PortGigModule", 25), ("fi4PortGigModule", 26), ("fi8PortGigModule", 27), ("fi8PortGigCopperModule", 28), ("fi8PortGigCopperManagementModule", 29), ("pos155Mbs2PModule", 30), ("fi4PortGigCopperManagementModule", 31), ("fi2PortGigCopperManagementModule", 32), ("bi4PortGigCopperManagementModule", 33), ("bi2PortGigCopperManagementModule", 34), ("bi8PortGigM4ManagementModule", 35), ("bi4PortGigM4ManagementModule", 36), ("bi2PortGigM4ManagementModule", 37), ("bi0PortGigM4ManagementModule", 38), ("bi0PortWSMManagementModule", 39), ("biPos2Port2488MbsModule", 40), ("bi0PortWSMModule", 41), ("niPos2Port2488MbsModule", 42), ("ni4802", 43), ("bi4PortGigNPAModule", 44), ("biAtm2Port155MbsModule", 45), ("biAtm4Port155MbsModule", 46), ("bi1Port10GigModule", 47), ("fes4802Module", 48), ("fes2402Module", 49), ("fes9604Module", 50), ("fes12GigCopperAndGigFiberModule", 51), ("fesx24GigModule", 52), ("fesx24Gig2TenGigModule", 53), ("fesx24Gig1TenGigModule", 54), ("fesx48GigModule", 55), ("fesx48Gig2TenGigModule", 56), ("fesx48Gig1TenGigModule", 57), ("bi40PortGigCopperHVModule", 58), ("bi60PortGigCopperHVModule", 59), ("bi8Port10GigModule", 60), ("bi10PortGigHVModule", 61), ("bi20PortGigHVModule", 62), ("bi24PortGigModule", 63), ("bi24PortGigCopperModule", 64), ("bi48PortGigCopperModule", 65), ("bi24PortGigFiberModule", 66), ("ni4Port10GigSPModule", 75), ("ni40PortGigSPModule", 76), ("ni40PortGigCopperSPModule", 77), ("ni2Port10GigSPModule", 78), ("ni10PortGigSPModule", 79), ("ni20PortGigSPModule", 80), ("xmr4Port10GigSPModule", 81), ("xmr20PortGigSPModule", 82), ("xmr2Port10GigSPModule", 83), ("xmr20PortGigCopperSPModule", 84), ("xmr20PortGigFXSPModule", 85), ("niImrMrManagementModule", 86), ("niXmrMrManagementModule", 87), ("xer4Port10GigSPModule", 88), ("xer2Port10GigSPModule", 89), ("xer20PortGigCopperSPModule", 90), ("xer20PortGigFXSPModule", 91), ("mlx4Port10GigSPModule", 92), ("mlx2Port10GigSPModule", 93), ("mlx20PortGigCopperSPModule", 94), ("mlx20PortGigFXSPModule", 95), ("mlx48PortGigMrj21SPModule", 103), ("fesx24GigFiberGigCopperModule", 112), ("fesx24GigFiber2TenGigModule", 113), ("fesx24GigFiber1TenGigModule", 114), ("fgs24PortManagementModule", 144), ("fgs48PortManagementModule", 145), ("fgsXfp2Port10gModule", 152), ("fgsCx42Port10gModule", 153), ("fgsXfp1Cx41Port10gModule", 154), ("fgsXpf1Port10gModule", 155), ("fls24PortCopperBaseModule", 160), ("fls48PortCopperBaseModule", 161), ("flsXfp1Port10gModule", 168), ("flsCx41Port10gModule", 169), ("fcx624SBaseModule", 176), ("fcx648SBaseModule", 177), ("fcx624SPoeBaseModule", 180), ("fcx648SPoeBaseModule", 181), ("fcxXfp2Port10gModule", 184), ("fcxCx42Port16gModule", 185), ("fcx624SFBaseModule", 192), ("biFiJc48ePort100fxIpcModule", 195), ("biFiJc48tPort100fxIpcModule", 196), ("biFiJc8PortGigM4ManagementModule", 197), ("biFiJc8PortGigIgcModule", 198), ("biFiJc16PortGigIgcModule", 200), ("biJc24PortCopperIpc4GigIgcModule", 201), ("biJc16PortGigCopperIgcModule", 202), ("biFiJc24Port100fxIpcModule", 206), ("bi2Port10GigModule", 207), ("biJc48tPortRJ21OmpModule", 208), ("biJc48ePortRJ45OmpModule", 209), ("biJc24PortIpcRJ45PoeModule", 212), ("biJc2PortGigIgcM4ManagementModule", 214), ("fdryBi4Port10GigModule", 1048), ("fdryBi40PortGigModule", 1049), ("fdryBi1Port100FXManagementModule", 1050), ("fdryBi2Port10GigModule", 1051), ("fdryBi40PortGigCopperModule", 1052), ("fdryBi60PortGigCopperModule", 1053), ("fdryBi4Port10GigHVModule", 1054), ("fdryBi2Port10GigHVModule", 1055), ("fdryBi8Port10GigHVModule", 1056), ("fdryBi40PortGigHVModule", 1057), ("fdryBi40PortGigCopperHVModule", 1058), ("fdryBi60PortGigCopperHVModule", 1059), ("fdryBi8Port10GigModule", 1060), ("fdryBi10PortGigHVModule", 1061), ("fdryBi20PortGigHVModule", 1062), ("fdryBi24PortGigModule", 1063), ("fdryBi24PortGigCopperModule", 1064), ("fdryBi48PortGigCopperModule", 1065), ("fdryBi24PortGigFiberModule", 1066), ("fdryBi16Port10GigModule", 1067), ("fdryNi4Port10GigSPModule", 1075), ("fdryNi40PortGigSPModule", 1076), ("fdryNi40PortGigCopperSPModule", 1077), ("fdryNi2Port10GigSPModule", 1078), ("fdryNi10PortGigSPModule", 1079), ("fdryNi20PortGigSPModule", 1080), ("fdryXmr4Port10GigSPModule", 1081), ("fdryXmr20PortGigSPModule", 1082), ("fdryXmr2Port10GigSPModule", 1083), ("fdryXmr20PortGigCopperSPModule", 1084), ("fdryXmr20PortGigFXSPModule", 1085), ("fdryNiImrMrManagementModule", 1086), ("fdryNiXmrMrManagementModule", 1087), ("fdryMlx4Port10GigSPModule", 1088), ("fdryMlx2Port10GigSPModule", 1089), ("fdryMlx20PortGigCopperSPModule", 1090), ("fdryMlx20PortGigFXSPModule", 1091), ("brMlx4Port10GigXModule", 1093), ("brMlx24PortGigCopperXModule", 1094), ("brMlx24PortGigSfpXModule", 1095), ("niCes24PortFiberModule", 1096), ("niCes24PortCopperModule", 1097), ("niCes2Port10GigModule", 1098), ("niCes48PortFiberModule", 1099), ("niCes48PortCopperModule", 1100), ("niCes48PortFiberWith2Port10GModule", 1101), ("niCes48PortCopperWith2Port10GModule", 1102), ("fdryMlx48PortGigMrj21SPModule", 1103), ("fdryXmr2PortOC192SPModule", 1104), ("fdryXmr1PortOC192SPModule", 1105), ("fdryXmr8PortOC48SPModule", 1106), ("fdryXmr4PortOC48SPModule", 1107), ("fdryXmr2PortOC48SPModule", 1108), ("fdryNiMlxMrManagementModule", 1109), ("niMlx8Port10GigMModule", 1110), ("niMlx8Port10GigDModule", 1111), ("brMlx8Port10GigXModule", 1112), ("brMlx2Port100GigXModule", 1113), ("brcdMlxMr2ManagementModule", 1114), ("brcdXmrMr2ManagementModule", 1115), ("brcdMlx32Mr2ManagementModule", 1116), ("brcdXmr32Mr2ManagementModule", 1117), ("brcdNiXmr32MrManagementModule", 1118), ("brcdNiMlx32MrManagementModule", 1119), ("brcdMlx24Port10GigDMModule", 1120), ("brMlx4Port40GigMModule", 1121), ("brcdNiCes4Port10GigModule", 1122), ("brMlx2Port100GigCFP2Module", 1123), ("brMlx20Port10GigModule", 1124), ("brMlx4Port10GigXIPSecModule", 1125), ("fdryFiV4Sx12ComboPortManagementModule", 2064), ("fdryFiV4Sx2Port10gModule", 2065), ("fdryFiV4Sx24PortGigCopperModule", 2066), ("fdryFiV4Sx24PortGigFiberModule", 2067), ("fdryFiV4Sx2Port10gLanWanModule", 2068), ("fdryFiV4Sx24Port100m1gFiberModule", 2069), ("fdryFiV4Sx12ComboPortManagement2Module", 2074), ("fdryFiV4Sx210gPortManagementModule", 2080), ("fdryFiSx0PortManagementModule", 2081), ("fdryFiV4Sx4g4fPortManagementModule", 2083), ("fdryFiV6Sx12ComboPortManagementModule", 2096), ("fdryFiV6Sx24PortGigCopperModule", 2098), ("fdryFiV6Sx2Port10gModule", 2100), ("fdryFiV6Sx24Port100m1gFiberModule", 2101), ("fdryFiV6Sx210gPortManagementModule", 2102), ("fdryFiV6Sx48PortGigCopperPoeModule", 2103), ("fdryFiV6Sx4g4fPortManagementModule", 2104), ("fdryFiV6Sx12ComboPortManagement2Module", 2105), ("fdryFiV6Sx48PortGigCopperModule", 2106), ("fdryFiV6SxXl0PortManagementModule", 2112), ("fdryFiV6SxXl210gPortManagementModule", 2113), ("fdryFcx624BaseModule", 2208), ("fdryFcx648BaseModule", 2209), ("fdryFcxSfpPlus4Port10gModule", 2220), ("fdryIcx6610624BaseModule", 2240), ("fdryIcx6610648BaseModule", 2241), ("fdryIcx6610624PoeBaseModule", 2244), ("fdryIcx6610648PoeBaseModule", 2245), ("fdryIcx6610624FBaseModule", 2246), ("fdryIcx6610DualMode8PortModule", 2248), ("fdryIcx6610Qsfp10Port160gModule", 2249), ("fdryIcx6430624BaseModule", 2016), ("fdryIcx6430648BaseModule", 2017), ("fdryIcx6430624PoeBaseModule", 2020), ("fdryIcx6430648PoeBaseModule", 2021), ("fdryIcx6430sfp4Port4gModule", 2024), ("fdryIcx6430612CBaseModule", 2137), ("fdryIcx6430Copper2Port2gModule", 2138), ("fdryIcx6430sfp2Port2gModule", 2139), ("fdryIcx6450624BaseModule", 2032), ("fdryIcx6450648BaseModule", 2033), ("fdryIcx6450624PoeBaseModule", 2036), ("fdryIcx6450648PoeBaseModule", 2037), ("fdryIcx6450sfp4Port40gModule", 2040), ("fdryIcx6450612CPDBaseModule", 2140), ("fdryIcx6450Copper2Port2gModule", 2141), ("fdryIcx6450sfp2Port2gModule", 2142), ("fdryIcx665056BaseModule", 2055), ("fdryIcx6650sfp4Port40gModule", 2056), ("fdryIcx6650sfp8Port10gModule", 2057), ("fdryIcx775048CBaseModule", 2136), ("fdryIcx775048FBaseModule", 2135), ("fdryIcx775026QBaseModule", 2134), ("fdryIcx7750QSFP6port40gModule", 2132), ("fdryIcx77506Q6port40gModule", 2133), ("fdryIcx7450624BaseModule", 2224), ("fdryIcx7450648BaseModule", 2225), ("fdryIcx7450648FBaseModule", 2227), ("fdryIcx7450624PoeBaseModule", 2228), ("fdryIcx7450648PoeBaseModule", 2229), ("fdryIcx7400sfpplus4Port40gModule", 2233), ("fdryIcx7400copper4Port40gModule", 2234), ("fdryIcx7400sfp4Port4gModule", 2235), ("fdryIcx7400qsfpplus1Port40gModule", 2236)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentConfigModuleType.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleType.setDescription('The Configured Module Type.')
snAgentConfigModuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("valid", 2), ("delete", 3), ("create", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentConfigModuleRowStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleRowStatus.setDescription('To create or delete a configured module table entry.')
snAgentConfigModuleDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModuleDescription.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleDescription.setDescription('A description of the configured module.')
snAgentConfigModuleOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModuleOperStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleOperStatus.setDescription('Module operational status. Zero length string indicates that physical module has not been inserted to the chassis.')
snAgentConfigModuleSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModuleSerialNumber.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleSerialNumber.setDescription('Module serial number. Zero length string indicates that module serial number EEPROM has not been programmed or the module does not support serial number EEPROM.')
snAgentConfigModuleNumberOfPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModuleNumberOfPorts.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleNumberOfPorts.setDescription('The number of ports of module.')
snAgentConfigModuleMgmtModuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18))).clone(namedValues=NamedValues(("other", 1), ("nonManagementModule", 2), ("unknownManagementModule", 3), ("m1ManagementModule", 4), ("m2ManagementModule", 5), ("m3ManagementModule", 6), ("m4ManagementModule", 7), ("m5ManagementModule", 8), ("jetcoreStackManagementModule", 9), ("muchoManagementModule", 10), ("rottWeilerManagementModule", 11), ("fesXStackManagementModule", 12), ("fgsStackManagementModule", 13), ("niCesManagementModule", 14), ("fastIronSuperXManagementModule", 15), ("fastIronSXRManagementModule", 16), ("fastIronV6SuperXManagementModule", 17), ("fastIronV6SXRManagementModule", 18)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModuleMgmtModuleType.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleMgmtModuleType.setDescription('Management module type.')
snAgentConfigModuleNumberOfCpus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModuleNumberOfCpus.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModuleNumberOfCpus.setDescription('The number of module CPUs.')
snAgentConfigModule2Table = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2), )
if mibBuilder.loadTexts: snAgentConfigModule2Table.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2Table.setDescription('A table of each configured stacking module information.')
snAgentConfigModule2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentConfigModule2Unit"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentConfigModule2Slot"))
if mibBuilder.loadTexts: snAgentConfigModule2Entry.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2Entry.setDescription('A row in the Agent Configured Stacking Module table.')
snAgentConfigModule2Unit = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModule2Unit.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2Unit.setDescription('The index to the Agent Configured Module Table.')
snAgentConfigModule2Slot = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModule2Slot.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2Slot.setDescription('The index to the Agent Configured Module Table.')
snAgentConfigModule2Type = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 64, 65, 66, 67, 68, 69, 74, 80, 81, 112, 113, 114, 144, 145, 152, 153, 154, 155, 160, 161, 168, 169, 176, 177, 180, 181, 184, 185, 192, 195, 196, 197, 198, 200, 201, 202, 206, 207, 208, 209, 212, 214, 2208, 2209, 2220, 2240, 2241, 2244, 2245, 2246, 2248, 2249, 2016, 2017, 2020, 2021, 2024, 2137, 2138, 2139, 2032, 2033, 2036, 2037, 2040, 2140, 2141, 2142, 2055, 2056, 2057, 2136, 2135, 2134, 2132, 2133, 2224, 2225, 2227, 2228, 2229, 2233, 2234, 2235, 2236))).clone(namedValues=NamedValues(("bi8PortGigManagementModule", 0), ("bi4PortGigManagementModule", 1), ("bi16PortCopperManagementModule", 2), ("bi4PortGigModule", 3), ("fi2PortGigManagementModule", 4), ("fi4PortGigManagementModule", 5), ("bi8PortGigCopperManagementModule", 6), ("fi8PortGigManagementModule", 7), ("bi8PortGigModule", 8), ("bi12PortGigCopper2PortGigFiberManagement", 9), ("bi24PortCopperModule", 10), ("fi24PortCopperModule", 11), ("bi16Port100FXModule", 12), ("bi8Port100FXModule", 13), ("bi8PortGigCopperModule", 14), ("bi12PortGigCopper2PortGigFiber", 15), ("bi2PortGigManagementModule", 18), ("bi24Port100FXModule", 19), ("bi0PortManagementModule", 20), ("pos622MbsModule", 21), ("pos155MbsModule", 22), ("bi2PortGigModule", 23), ("bi2PortGigCopperModule", 24), ("fi2PortGigModule", 25), ("fi4PortGigModule", 26), ("fi8PortGigModule", 27), ("fi8PortGigCopperModule", 28), ("fi8PortGigCopperManagementModule", 29), ("pos155Mbs2PModule", 30), ("fi4PortGigCopperManagementModule", 31), ("fi2PortGigCopperManagementModule", 32), ("bi4PortGigCopperManagementModule", 33), ("bi2PortGigCopperManagementModule", 34), ("bi8PortGigM4ManagementModule", 35), ("bi4PortGigM4ManagementModule", 36), ("bi2PortGigM4ManagementModule", 37), ("bi0PortGigM4ManagementModule", 38), ("bi0PortWSMManagementModule", 39), ("biPos2Port2488MbsModule", 40), ("bi0PortWSMModule", 41), ("niPos2Port2488MbsModule", 42), ("ni4802", 43), ("bi4PortGigNPAModule", 44), ("biAtm2Port155MbsModule", 45), ("biAtm4Port155MbsModule", 46), ("bi1Port10GigModule", 47), ("fes4802Module", 48), ("fes2402Module", 49), ("fes9604Module", 50), ("fes12GigCopperAndGigFiberModule", 51), ("fesx24GigModule", 52), ("fesx24Gig2TenGigModule", 53), ("fesx24Gig1TenGigModule", 54), ("fesx48GigModule", 55), ("fesx48Gig2TenGigModule", 56), ("fesx48Gig1TenGigModule", 57), ("superx12ComboPortManagementModule", 64), ("superx2PortTenGigModule", 65), ("superx24PortGigCopperModule", 66), ("superx24PortGigFiberModule", 67), ("superx2PortTenGigLanWanModule", 68), ("superx24Port100tx1PortGigFiberModule", 69), ("superx12ComboPortManagement2Module", 74), ("superxR2PortTenGigManagementModule", 80), ("superxRManagementModule", 81), ("fesx24GigFiberGigCopperModule", 112), ("fesx24GigFiber2TenGigModule", 113), ("fesx24GigFiber1TenGigModule", 114), ("fgs24PortManagementModule", 144), ("fgs48PortManagementModule", 145), ("fgsXfp2Port10gModule", 152), ("fgsCx42Port10gModule", 153), ("fgsXfp1Cx41Port10gModule", 154), ("fgsXpf1Port10gModule", 155), ("fls24PortCopperBaseModule", 160), ("fls48PortCopperBaseModule", 161), ("flsXfp1Port10gModule", 168), ("flsCx41Port10gModule", 169), ("fcx624SBaseModule", 176), ("fcx648SBaseModule", 177), ("fcx624SPoeBaseModule", 180), ("fcx648SPoeBaseModule", 181), ("fcxXfp2Port10gModule", 184), ("fcxCx42Port16gModule", 185), ("fcx624SFBaseModule", 192), ("biFiJc48ePort100fxIpcModule", 195), ("biFiJc48tPort100fxIpcModule", 196), ("biFiJc8PortGigM4ManagementModule", 197), ("biFiJc8PortGigIgcModule", 198), ("biFiJc16PortGigIgcModule", 200), ("biJc24PortCopperIpc4GigIgcModule", 201), ("biJc16PortGigCopperIgcModule", 202), ("biFiJc24Port100fxIpcModule", 206), ("bi2Port10GigModule", 207), ("biJc48tPortRJ21OmpModule", 208), ("biJc48ePortRJ45OmpModule", 209), ("biJc24PortIpcRJ45PoeModule", 212), ("biJc2PortGigIgcM4ManagementModule", 214), ("fdryFcx624BaseModule", 2208), ("fdryFcx648BaseModule", 2209), ("fdryFcxSfpPlus4Port10gModule", 2220), ("fdryIcx6610624BaseModule", 2240), ("fdryIcx6610648BaseModule", 2241), ("fdryIcx6610624PoeBaseModule", 2244), ("fdryIcx6610648PoeBaseModule", 2245), ("fdryIcx6610624FBaseModule", 2246), ("fdryIcx6610DualMode8PortModule", 2248), ("fdryIcx6610Qsfp10Port160gModule", 2249), ("fdryIcx6430624BaseModule", 2016), ("fdryIcx6430648BaseModule", 2017), ("fdryIcx6430624PoeBaseModule", 2020), ("fdryIcx6430648PoeBaseModule", 2021), ("fdryIcx6430sfp4Port4gModule", 2024), ("fdryIcx6430612CBaseModule", 2137), ("fdryIcx6430Copper2Port2gModule", 2138), ("fdryIcx6430sfp2Port2gModule", 2139), ("fdryIcx6450624BaseModule", 2032), ("fdryIcx6450648BaseModule", 2033), ("fdryIcx6450624PoeBaseModule", 2036), ("fdryIcx6450648PoeBaseModule", 2037), ("fdryIcx6450sfp4Port40gModule", 2040), ("fdryIcx6450612CPDBaseModule", 2140), ("fdryIcx6450Copper2Port2gModule", 2141), ("fdryIcx6450sfp2Port2gModule", 2142), ("fdryIcx665056BaseModule", 2055), ("fdryIcx6650sfp4Port40gModule", 2056), ("fdryIcx6650sfp8Port10gModule", 2057), ("fdryIcx775048CBaseModule", 2136), ("fdryIcx775048FBaseModule", 2135), ("fdryIcx775026QBaseModule", 2134), ("fdryIcx7750QSFP6port40gModule", 2132), ("fdryIcx77506Q6port40gModule", 2133), ("fdryIcx7450624BaseModule", 2224), ("fdryIcx7450648BaseModule", 2225), ("fdryIcx7450648FBaseModule", 2227), ("fdryIcx7450624PoeBaseModule", 2228), ("fdryIcx7450648PoeBaseModule", 2229), ("fdryIcx7400sfpplus4Port40gModule", 2233), ("fdryIcx7400copper4Port40gModule", 2234), ("fdryIcx7400sfp4Port4gModule", 2235), ("fdryIcx7400qsfpplus1Port40gModule", 2236)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentConfigModule2Type.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2Type.setDescription('The Configured Module Type.')
snAgentConfigModule2RowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("valid", 2), ("delete", 3), ("create", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentConfigModule2RowStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2RowStatus.setDescription('To create or delete a configured module table entry.')
snAgentConfigModule2Description = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModule2Description.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2Description.setDescription('A description of the configured module.')
snAgentConfigModule2OperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModule2OperStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2OperStatus.setDescription('Module operational status. Zero length string indicates that physical module has not been inserted to the chassis.')
snAgentConfigModule2SerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModule2SerialNumber.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2SerialNumber.setDescription('Module serial number. Zero length string indicates that module serial number EEPROM has not been programmed or the module does not support serial number EEPROM.')
snAgentConfigModule2NumberOfPorts = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModule2NumberOfPorts.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2NumberOfPorts.setDescription('The number of ports of module.')
snAgentConfigModule2MgmtModuleType = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13))).clone(namedValues=NamedValues(("other", 1), ("nonManagementModule", 2), ("unknownManagementModule", 3), ("m1ManagementModule", 4), ("m2ManagementModule", 5), ("m3ManagementModule", 6), ("m4ManagementModule", 7), ("m5ManagementModule", 8), ("jetcoreStackManagementModule", 9), ("muchoManagementModule", 10), ("rottWeilerManagementModule", 11), ("fesXStackManagementModule", 12), ("fgsStackManagementModule", 13)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModule2MgmtModuleType.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2MgmtModuleType.setDescription('Management module type.')
snAgentConfigModule2NumberOfCpus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 8, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentConfigModule2NumberOfCpus.setStatus('current')
if mibBuilder.loadTexts: snAgentConfigModule2NumberOfCpus.setDescription('The number of module CPUs.')
snAgentUserGbl = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 1))
snAgentUserMaxAccnt = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentUserMaxAccnt.setStatus('current')
if mibBuilder.loadTexts: snAgentUserMaxAccnt.setDescription('Maximum number of user account entries can be configured.')
snAgentUserAccntTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 2), )
if mibBuilder.loadTexts: snAgentUserAccntTable.setStatus('current')
if mibBuilder.loadTexts: snAgentUserAccntTable.setDescription('A table of user account information.')
snAgentUserAccntEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentUserAccntName"))
if mibBuilder.loadTexts: snAgentUserAccntEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentUserAccntEntry.setDescription('A row in the Agent User table.')
snAgentUserAccntName = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 48))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentUserAccntName.setStatus('current')
if mibBuilder.loadTexts: snAgentUserAccntName.setDescription('The user name.')
snAgentUserAccntPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 48))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentUserAccntPassword.setStatus('current')
if mibBuilder.loadTexts: snAgentUserAccntPassword.setDescription('The user password.')
snAgentUserAccntEncryptCode = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentUserAccntEncryptCode.setStatus('current')
if mibBuilder.loadTexts: snAgentUserAccntEncryptCode.setDescription('The password encryption method code.')
snAgentUserAccntPrivilege = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentUserAccntPrivilege.setStatus('current')
if mibBuilder.loadTexts: snAgentUserAccntPrivilege.setDescription('The user privilege.')
snAgentUserAccntRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 9, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("valid", 2), ("delete", 3), ("create", 4), ("modify", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentUserAccntRowStatus.setStatus('current')
if mibBuilder.loadTexts: snAgentUserAccntRowStatus.setDescription('To create or delete a user account table entry.')
snAgentRedunGbl = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 10, 1))
snAgentRedunActiveMgmtMod = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 10, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentRedunActiveMgmtMod.setStatus('current')
if mibBuilder.loadTexts: snAgentRedunActiveMgmtMod.setDescription('Slot number of the active management module. Setting this object does not take effect immediately. Saving configuration data to flash storage and reboot the system are required to take effect. Setting a value of 0 requests the system to auto-select an active management module after power up.')
snAgentRedunSyncConfig = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 10, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentRedunSyncConfig.setStatus('current')
if mibBuilder.loadTexts: snAgentRedunSyncConfig.setDescription('Frequency of the backup management module copying the configuration data from the active management module. Each unit is 1 second. Setting a value 0 will disable the synchronization copy. Setting a negative value will initiate the synchronization copy once immediately.')
snAgentRedunBkupCopyBootCode = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('disabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentRedunBkupCopyBootCode.setStatus('current')
if mibBuilder.loadTexts: snAgentRedunBkupCopyBootCode.setDescription("If enabled(1), the backup management module copies the boot code from the active management module to its boot code flash storage after power up, and whenever the active management module's boot code is updated. The backup management module does not copy if both boot codes were identical.")
snAgentEnableMgmtModRedunStateChangeTrap = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disabled", 0), ("enabled", 1))).clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentEnableMgmtModRedunStateChangeTrap.setStatus('current')
if mibBuilder.loadTexts: snAgentEnableMgmtModRedunStateChangeTrap.setDescription('Indicates whether the SNMP agent process is permitted to generate management module redundancy state change traps.')
snAgentRedunBkupBootLoad = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 10, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 17, 20))).clone(namedValues=NamedValues(("normal", 1), ("operationError", 17), ("downloadBackup", 20)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentRedunBkupBootLoad.setStatus('current')
if mibBuilder.loadTexts: snAgentRedunBkupBootLoad.setDescription('Action object to down load a new boot code from boot flash storage of the active management module to the backup management module. The following are returned values from get operation: normal(1)............no operation operationError(17)...error codes The following are input values from set operation: downloadBackup(20)...download the boot code from the active management module to the backup management module Any set operation is rejected during loading until error or normal state is reached.')
snAgentRedunSwitchOver = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("other", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentRedunSwitchOver.setStatus('current')
if mibBuilder.loadTexts: snAgentRedunSwitchOver.setDescription('To force a switch-over from standby to active state if the backup management module exists in a chassis.')
snAgentCpuUtilTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 1), )
if mibBuilder.loadTexts: snAgentCpuUtilTable.setStatus('current')
if mibBuilder.loadTexts: snAgentCpuUtilTable.setDescription('Table to list utilization for all CPUs in the device.')
snAgentCpuUtilEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentCpuUtilSlotNum"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentCpuUtilCpuId"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentCpuUtilInterval"))
if mibBuilder.loadTexts: snAgentCpuUtilEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentCpuUtilEntry.setDescription('A row in the CPU utilization table.')
snAgentCpuUtilSlotNum = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentCpuUtilSlotNum.setStatus('current')
if mibBuilder.loadTexts: snAgentCpuUtilSlotNum.setDescription('The slot number of module which contains the cpu.')
snAgentCpuUtilCpuId = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentCpuUtilCpuId.setStatus('current')
if mibBuilder.loadTexts: snAgentCpuUtilCpuId.setDescription("The id of cpu. For non-VM1/WSM management module, there is one CPU. For VM1/WSM there's one management CPU and three slave CPUs. The management CPU could be turned off. For POS and ATM there's no management CPU but two slave CPUs. Id for management cpu is 1. Value of 2 or greater are for slave CPUs. ")
snAgentCpuUtilInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentCpuUtilInterval.setStatus('current')
if mibBuilder.loadTexts: snAgentCpuUtilInterval.setDescription('The value, in seconds, for this utilization. For both management and slave CPU, we display utilization for 1 sec, 5 sec, 60 sec and 300 sec interval.')
snAgentCpuUtilValue = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentCpuUtilValue.setStatus('deprecated')
if mibBuilder.loadTexts: snAgentCpuUtilValue.setDescription('The statistical CPU utilization in units of one-hundredth of a percent. This value is deprecated. Users are recommended to use snAgentCpuUtilPercent or snAgentCpuUtil100thPercent instead.')
snAgentCpuUtilPercent = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentCpuUtilPercent.setStatus('current')
if mibBuilder.loadTexts: snAgentCpuUtilPercent.setDescription('The statistical CPU utilization in units of a percent.')
snAgentCpuUtil100thPercent = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 1, 1, 6), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentCpuUtil100thPercent.setStatus('current')
if mibBuilder.loadTexts: snAgentCpuUtil100thPercent.setDescription('The statistical CPU utilization in units of one-hundredth of a percent.')
snCpuProcessTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 2), )
if mibBuilder.loadTexts: snCpuProcessTable.setStatus('current')
if mibBuilder.loadTexts: snCpuProcessTable.setDescription('Table to list utilization and runtime for all CPU processes in the device. For NetIron Devices snAgentCpuProcessEnable object needs to be enabled.')
snCpuProcessEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snCpuProcessName"))
if mibBuilder.loadTexts: snCpuProcessEntry.setStatus('current')
if mibBuilder.loadTexts: snCpuProcessEntry.setDescription('A row in the CPU process table.')
snCpuProcessName = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 48))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCpuProcessName.setStatus('current')
if mibBuilder.loadTexts: snCpuProcessName.setDescription('The process name.')
snCpuProcess5SecUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 2, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCpuProcess5SecUtil.setStatus('current')
if mibBuilder.loadTexts: snCpuProcess5SecUtil.setDescription('The statistics collection of last 5 second process utilization. It is a 100th percentile value (100*n). It can have a value 0 or a value between 100 to 10000 in multiples of 100. If the agent is queried immediately after turning on the CPU usage and 5 seconds have not elapsed then the data will not be available. Therefore we will be returning the data for the actual elapsed time for NetIron devices.')
snCpuProcess1MinUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 2, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCpuProcess1MinUtil.setStatus('current')
if mibBuilder.loadTexts: snCpuProcess1MinUtil.setDescription('The statistics collection of last 5 second process utilization. It is a 100th percentile value (100*n). It can have a value 0 or a value between 100 to 10000 in multiples of 100. If the agent is queried immediately after turning on the CPU usage and 1 minute has not elapsed then the data will not be available. Therefore we will be returning the data for the actual elapsed time for NetIron devices.')
snCpuProcess5MinUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 2, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCpuProcess5MinUtil.setStatus('current')
if mibBuilder.loadTexts: snCpuProcess5MinUtil.setDescription('The statistics collection of last 5 second process utilization. It is a 100th percentile value (100*n). It can have a value 0 or a value between 100 to 10000 in multiples of 100. If the agent is queried immediately after turning on the CPU usage and 5 minutess have not elapsed then the data will not be available. Therefore we will be returning the data for the actual elapsed time for NetIron devices.')
snCpuProcess15MinUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCpuProcess15MinUtil.setStatus('current')
if mibBuilder.loadTexts: snCpuProcess15MinUtil.setDescription('The statistics collection of last 5 second process utilization. It is a 100th percentile value (100*n). It can have a value 0 or a value between 100 to 10000 in multiples of 100. This object is not supported and has been excluded from agent for NetIron devices')
snCpuProcessRuntime = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCpuProcessRuntime.setStatus('current')
if mibBuilder.loadTexts: snCpuProcessRuntime.setDescription('Process runtime in milliseconds.')
snAgentCpuProcessEnable = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 11, 3), EnabledStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentCpuProcessEnable.setStatus('current')
if mibBuilder.loadTexts: snAgentCpuProcessEnable.setDescription('Enables the CPU utilization statistics collection.')
snAgentHwICBMCounterTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1), )
if mibBuilder.loadTexts: snAgentHwICBMCounterTable.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterTable.setDescription('Table to list the ICBM counter values. This table is not supported on 10G module.')
snAgentHwICBMCounterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentHwICBMCounterSlot"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentHwICBMCounterDMA"))
if mibBuilder.loadTexts: snAgentHwICBMCounterEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterEntry.setDescription('A row representing ICBM counter values for that slot.')
snAgentHwICBMCounterSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentHwICBMCounterSlot.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterSlot.setDescription('Slot number where ICBM resides.')
snAgentHwICBMCounterDMA = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentHwICBMCounterDMA.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterDMA.setDescription('DMA Id within a slot where ICBM resides. Valid only for Jetcore modules. For non-Jetcore modules, this index is ignored by the agent. In this case, value 0 will be returned by the agent.')
snAgentHwICBMCounterFreeDepth = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1, 3), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentHwICBMCounterFreeDepth.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterFreeDepth.setDescription('Current depth of the free queue for this ICBM.')
snAgentHwICBMCounterWriteDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentHwICBMCounterWriteDrop.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterWriteDrop.setDescription('Write sequencer drop count for this ICBM.')
snAgentHwICBMCounterWriteInput = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentHwICBMCounterWriteInput.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterWriteInput.setDescription('Write sequencer input counter for this ICBM.')
snAgentHwICBMCounterWriteOutput = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentHwICBMCounterWriteOutput.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterWriteOutput.setDescription('Write sequencer output counter for this ICBM.')
snAgentHwICBMCounterReadInput = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentHwICBMCounterReadInput.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterReadInput.setDescription('Read sequencer input counter for this ICBM.')
snAgentHwICBMCounterReadOutput = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentHwICBMCounterReadOutput.setStatus('current')
if mibBuilder.loadTexts: snAgentHwICBMCounterReadOutput.setDescription('Read sequencer output counter for this ICBM.')
snCAMIpStatTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 2), )
if mibBuilder.loadTexts: snCAMIpStatTable.setStatus('current')
if mibBuilder.loadTexts: snCAMIpStatTable.setDescription('Table to list the IP CAM statistics. ')
snCAMIpStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snCAMIpStatIfIndex"), (0, "FOUNDRY-SN-AGENT-MIB", "snCAMIpStatLevel"))
if mibBuilder.loadTexts: snCAMIpStatEntry.setStatus('current')
if mibBuilder.loadTexts: snCAMIpStatEntry.setDescription('A row representing IP CAM statistics for a given interface and level.')
snCAMIpStatIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 2, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCAMIpStatIfIndex.setStatus('current')
if mibBuilder.loadTexts: snCAMIpStatIfIndex.setDescription('ifIndex value of the local interface.')
snCAMIpStatLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 2, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCAMIpStatLevel.setStatus('current')
if mibBuilder.loadTexts: snCAMIpStatLevel.setDescription('Level of CAM entry for that interface.')
snCAMIpStatFreeEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 2, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCAMIpStatFreeEntries.setStatus('current')
if mibBuilder.loadTexts: snCAMIpStatFreeEntries.setDescription('Free entries in the IP CAM for that interface and level.')
snCAMIpStatTotalEntries = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 2, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCAMIpStatTotalEntries.setStatus('current')
if mibBuilder.loadTexts: snCAMIpStatTotalEntries.setDescription('Total entries in the IP CAM for that interface and level.')
snCAMStatTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3), )
if mibBuilder.loadTexts: snCAMStatTable.setStatus('current')
if mibBuilder.loadTexts: snCAMStatTable.setDescription('Table to list the CAM statistics. ')
snCAMStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snCamStatDMAIdNumber"))
if mibBuilder.loadTexts: snCAMStatEntry.setStatus('current')
if mibBuilder.loadTexts: snCAMStatEntry.setDescription('A row representing CAM statistics for a given DMA Id number.')
snCamStatDMAIdNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 1), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatDMAIdNumber.setStatus('current')
if mibBuilder.loadTexts: snCamStatDMAIdNumber.setDescription('DMA Id number.')
snCamStatDMAMasterNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatDMAMasterNumber.setStatus('current')
if mibBuilder.loadTexts: snCamStatDMAMasterNumber.setDescription('DMA Master for that DMA Id.')
snCamStatFreePool0Entries = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatFreePool0Entries.setStatus('current')
if mibBuilder.loadTexts: snCamStatFreePool0Entries.setDescription('CAM free pool0 entries.')
snCamStatFreePool1Entries = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatFreePool1Entries.setStatus('current')
if mibBuilder.loadTexts: snCamStatFreePool1Entries.setDescription('CAM free pool1 entries.')
snCamStatFreePool2Entries = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatFreePool2Entries.setStatus('current')
if mibBuilder.loadTexts: snCamStatFreePool2Entries.setDescription('CAM free pool2 entries.')
snCamStatFreePool3Entries = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatFreePool3Entries.setStatus('current')
if mibBuilder.loadTexts: snCamStatFreePool3Entries.setDescription('CAM free pool3 entries.')
snCamStatFreeL2Entries = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatFreeL2Entries.setStatus('current')
if mibBuilder.loadTexts: snCamStatFreeL2Entries.setDescription('CAM Free L2 entries.')
snCamStatFreeL2LowestSection = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatFreeL2LowestSection.setStatus('current')
if mibBuilder.loadTexts: snCamStatFreeL2LowestSection.setDescription('CAM Free L2 lowest section entries.')
snCamStatHostLookupCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatHostLookupCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatHostLookupCount.setDescription('CAM host lookup count for router.')
snCamStatRouteLookupCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatRouteLookupCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatRouteLookupCount.setDescription('CAM route lookup count for router.')
snCamStatLevel1 = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatLevel1.setStatus('current')
if mibBuilder.loadTexts: snCamStatLevel1.setDescription('CAM stat level1 entries for router.')
snCamStatLevel2 = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatLevel2.setStatus('current')
if mibBuilder.loadTexts: snCamStatLevel2.setDescription('CAM stat level2 entries for router.')
snCamStatLevel3 = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatLevel3.setStatus('current')
if mibBuilder.loadTexts: snCamStatLevel3.setDescription('CAM stat level3 entries for router.')
snCamStatMacFailCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatMacFailCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatMacFailCount.setDescription('CAM MAC fail count.')
snCamStatIPRouteFailCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatIPRouteFailCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatIPRouteFailCount.setDescription('CAM IP route fail count.')
snCamStatIPSessionFailCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatIPSessionFailCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatIPSessionFailCount.setDescription('CAM IP session fail count.')
snCamStatIPMCastFailCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatIPMCastFailCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatIPMCastFailCount.setDescription('CAM IP multicast fail count.')
snCamStatL2SessionFailCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatL2SessionFailCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatL2SessionFailCount.setDescription('CAM L2 session fail count.')
snCamStatAddMACCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatAddMACCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatAddMACCount.setDescription('CAM add MAC count.')
snCamStatAddVLANCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatAddVLANCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatAddVLANCount.setDescription('CAM add VLAN count.')
snCamStatAddIPHostCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatAddIPHostCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatAddIPHostCount.setDescription('CAM add IP host count.')
snCamStatAddIPRouteCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatAddIPRouteCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatAddIPRouteCount.setDescription('CAM add IP route count.')
snCamStatAddIPSessionCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatAddIPSessionCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatAddIPSessionCount.setDescription('CAM add IP session count.')
snCamStatAddIPMCastCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatAddIPMCastCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatAddIPMCastCount.setDescription('CAM add IP multicast count.')
snCamStatAddL2SessionCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatAddL2SessionCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatAddL2SessionCount.setDescription('CAM add L2 session count.')
snCamStatAddIPXCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatAddIPXCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatAddIPXCount.setDescription('CAM add IPX count.')
snCamStatDeleteDMACamCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 3, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snCamStatDeleteDMACamCount.setStatus('current')
if mibBuilder.loadTexts: snCamStatDeleteDMACamCount.setDescription('CAM delete DMA CAM count.')
snAgSystemDRAM = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 4))
snAgSystemDRAMUtil = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 4, 1), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDRAMUtil.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDRAMUtil.setDescription('The system dynamic memory utilization, in unit of percentage.')
snAgSystemDRAMTotal = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 4, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDRAMTotal.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDRAMTotal.setDescription('The total amount of system dynamic memory, in number of bytes.')
snAgSystemDRAMFree = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 4, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDRAMFree.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDRAMFree.setDescription('The free amount of system dynamic memory, in number of bytes.')
snAgSystemDRAMForBGP = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 4, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDRAMForBGP.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDRAMForBGP.setDescription('The free amount of system dynamic memory used by BGP, in number of bytes.')
snAgSystemDRAMForOSPF = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 4, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDRAMForOSPF.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDRAMForOSPF.setDescription('The free amount of system dynamic memory used by OSPF, in number of bytes.')
snAgSystemDebug = MibIdentifier((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5))
snAgSystemDebugTotalIn = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugTotalIn.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugTotalIn.setDescription('Total incoming packet count. Sum of Buffer Manager and CPU read count.')
snAgSystemDebugTotalOut = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugTotalOut.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugTotalOut.setDescription('Total outgoing packet count.')
snAgSystemDebugCpuQueueRead = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugCpuQueueRead.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugCpuQueueRead.setDescription('CPU Queue read count.')
snAgSystemDebugDRAMBuffer = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugDRAMBuffer.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugDRAMBuffer.setDescription('DRAM buffer count.')
snAgSystemDebugBMBuffer = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugBMBuffer.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugBMBuffer.setDescription('BM buffer count.')
snAgSystemDebugBMFreeBuffer = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugBMFreeBuffer.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugBMFreeBuffer.setDescription('BM free buffer count.')
snAgSystemDebugBMFreeBufferMgmt = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugBMFreeBufferMgmt.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugBMFreeBufferMgmt.setDescription('BM free buffer management count.')
snAgSystemDebugIpcGigLock = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugIpcGigLock.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugIpcGigLock.setDescription('IPC GIG lock count.')
snAgSystemDebugDRAMGetError = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugDRAMGetError.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugDRAMGetError.setDescription('DRAM get error count.')
snAgSystemDebugDRAMToBMCopyFail = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 12, 5, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgSystemDebugDRAMToBMCopyFail.setStatus('current')
if mibBuilder.loadTexts: snAgSystemDebugDRAMToBMCopyFail.setDescription('DRAM to Buffer Manager copy fail count.')
snAgentTempTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 1), )
if mibBuilder.loadTexts: snAgentTempTable.setStatus('current')
if mibBuilder.loadTexts: snAgentTempTable.setDescription('Table to list temperatures of all the modules in the device. This table is applicable to only those modules with temperature sensors.')
snAgentTempEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentTempSlotNum"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentTempSensorId"))
if mibBuilder.loadTexts: snAgentTempEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentTempEntry.setDescription('A row in the module temperature table.')
snAgentTempSlotNum = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: snAgentTempSlotNum.setStatus('current')
if mibBuilder.loadTexts: snAgentTempSlotNum.setDescription('The slot number of module which contains the temperature sensor represented by this row.')
snAgentTempSensorId = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 1, 1, 2), Integer32())
if mibBuilder.loadTexts: snAgentTempSensorId.setStatus('current')
if mibBuilder.loadTexts: snAgentTempSensorId.setDescription('The temperature sensor identifier of Slave module whose temperature is represented by this row, for management module: sensor#1 - Intake Side Temperature sensor# 2 - Exhaust Side Temperature.')
snAgentTempSensorDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTempSensorDescr.setStatus('current')
if mibBuilder.loadTexts: snAgentTempSensorDescr.setDescription('Describes the temperature sensor in a human readable form. This is the same as snAgentTempSensorId, which is numeric and used to traverse the temperature sensor table. The description provides the meaning and purpose of this senor.')
snAgentTempValue = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-110, 250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTempValue.setStatus('current')
if mibBuilder.loadTexts: snAgentTempValue.setDescription('Temperature of the the sensor represented by this row. Each unit is 0.5 degrees Celcius.')
snAgentTempThresholdTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 2), )
if mibBuilder.loadTexts: snAgentTempThresholdTable.setStatus('current')
if mibBuilder.loadTexts: snAgentTempThresholdTable.setDescription('Table to list temperature threshold levels for 4 speeds of fan settings. Depending on the temperature level, the fans run at diffrent speeds of RPM. There are 4 levels of temperature settings for 4 fan speeds (low, medium, medium-high, high). This table is applicable to only those modules with temperature sensors. For each row, there are 2 temperature threshold values. The high value, if reached causes the fan to run at next high level speed and when it reduces the below the low value, the fan runs at next low spped.')
snAgentTempThresholdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentTempThresholdModule"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentTempThresholdLevel"))
if mibBuilder.loadTexts: snAgentTempThresholdEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentTempThresholdEntry.setDescription('A row in the module temperature threshold table.')
snAgentTempThresholdModule = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("mgmtModule", 1), ("slaveModule", 2), ("switchFabricModule", 3))))
if mibBuilder.loadTexts: snAgentTempThresholdModule.setStatus('current')
if mibBuilder.loadTexts: snAgentTempThresholdModule.setDescription('The module in the system for which threshold levels represented by this row are applicable.')
snAgentTempThresholdLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("low", 1), ("medium", 2), ("mediumHhigh", 3), ("high", 4))))
if mibBuilder.loadTexts: snAgentTempThresholdLevel.setStatus('current')
if mibBuilder.loadTexts: snAgentTempThresholdLevel.setDescription('The temperature threshold level of the module for which threshold levels represented by this row are applicable.')
snAgentTempThresholdHighValue = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 2, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-110, 250))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentTempThresholdHighValue.setStatus('current')
if mibBuilder.loadTexts: snAgentTempThresholdHighValue.setDescription("The high value for the temperature threshold, above which the fans would need to operate at the next higher speed. If it reaches more than the high threshold value for 'high' level, the module will be shutdown.")
snAgentTempThresholdLowValue = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 2, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-110, 250))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: snAgentTempThresholdLowValue.setStatus('current')
if mibBuilder.loadTexts: snAgentTempThresholdLowValue.setDescription("The low value for the temperature threshold, below which the fans would need to operate at the next lower speed. This value is not applicable for the 'low' level, as there is no more lower speeds than that.")
snAgentTemp2Table = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 3), )
if mibBuilder.loadTexts: snAgentTemp2Table.setStatus('current')
if mibBuilder.loadTexts: snAgentTemp2Table.setDescription('Table to list temperatures of the modules in the device for each unit. This table is applicable to only those modules with temperature sensors.')
snAgentTemp2Entry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 3, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentTemp2UnitNum"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentTemp2SlotNum"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentTemp2SensorId"))
if mibBuilder.loadTexts: snAgentTemp2Entry.setStatus('current')
if mibBuilder.loadTexts: snAgentTemp2Entry.setDescription('A row in the module temperature table.')
snAgentTemp2UnitNum = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 3, 1, 1), Integer32())
if mibBuilder.loadTexts: snAgentTemp2UnitNum.setStatus('current')
if mibBuilder.loadTexts: snAgentTemp2UnitNum.setDescription('The unit number of module which contains the temperature sensor represented by this row.')
snAgentTemp2SlotNum = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 3, 1, 2), Integer32())
if mibBuilder.loadTexts: snAgentTemp2SlotNum.setStatus('current')
if mibBuilder.loadTexts: snAgentTemp2SlotNum.setDescription('The slot number of module which contains the temperature sensor represented by this row.')
snAgentTemp2SensorId = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 3, 1, 3), Integer32())
if mibBuilder.loadTexts: snAgentTemp2SensorId.setStatus('current')
if mibBuilder.loadTexts: snAgentTemp2SensorId.setDescription('The temperature sensor identifier of Slave module whose temperature is represented by this row, for FastIron management module: sensor#1 - Intake Side Temperature sensor# 2 - Exhaust Side Temperature.')
snAgentTemp2SensorDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 3, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTemp2SensorDescr.setStatus('current')
if mibBuilder.loadTexts: snAgentTemp2SensorDescr.setDescription('Describes the temperature sensor in a human readable form. This is the same as snAgentTempSensorId, which is numeric and used to traverse the temperature sensor table. The description provides the meaning and purpose of this senor.')
snAgentTemp2Value = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 13, 3, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-110, 250))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTemp2Value.setStatus('current')
if mibBuilder.loadTexts: snAgentTemp2Value.setDescription('Temperature of the sensor represented by this row. Each unit is 0.5 degrees Celsius.')
fdryLicenseTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1), )
if mibBuilder.loadTexts: fdryLicenseTable.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseTable.setDescription('A list of licenses maintained by license sub-system.')
fdryLicenseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "fdryLicensePackageName"), (0, "FOUNDRY-SN-AGENT-MIB", "fdryLicenseLid"), (1, "FOUNDRY-SN-AGENT-MIB", "fdryLicenseHash"))
if mibBuilder.loadTexts: fdryLicenseEntry.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseEntry.setDescription('An entry in a license table.')
fdryLicensePackageName = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 24)))
if mibBuilder.loadTexts: fdryLicensePackageName.setStatus('current')
if mibBuilder.loadTexts: fdryLicensePackageName.setDescription('Name of the package, whose license information, this entry displays.')
fdryLicenseLid = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 11)))
if mibBuilder.loadTexts: fdryLicenseLid.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseLid.setDescription('For FastIron: License Id (LID) of the license from the package. For the node locked license, this LID is same as LID of the device. For the non-node locked license, this LID is set as 2.0. This entry displays license information. For NetIron: License Id (LID) of the chassis or the line module for which, this entry displays license information.')
fdryLicenseHash = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 17)))
if mibBuilder.loadTexts: fdryLicenseHash.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseHash.setDescription('A unique hash for identifying a license entry in the system. This helps traverse through the entries with same package name and LID.')
fdryLicenseType = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("trial", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseType.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseType.setDescription('The type of the license, which can be either normal or trial.')
fdryLicensePrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicensePrecedence.setStatus('current')
if mibBuilder.loadTexts: fdryLicensePrecedence.setDescription('Defines the priority of a particular trial license among those having the same package and LID. This is primarily used for determining which license to use, when there are many trial and normal licenses with same package name and LID.')
fdryLicenseTrialDays = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseTrialDays.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseTrialDays.setDescription("The number of trial days for the license, if it's a trial license. Otherwise, the value has no meaning for normal licenses and read as 0 on Get operation.")
fdryLicenseTrialTimeElapsed = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseTrialTimeElapsed.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseTrialTimeElapsed.setDescription("The number of trial hours for the license, if it's a trial license. Otherwise, the value has no meaning for normal licenses and read as 0 on Get operation.")
fdryLicenseTrialTimeLeft = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseTrialTimeLeft.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseTrialTimeLeft.setDescription('The number of hours left for the trial license. This is derived from the total number of hours and the cumulative number of hours used. For a normal license, this is 0.')
fdryLicenseTrialState = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("invalid", 1), ("unused", 2), ("active", 3), ("expired", 4), ("duplicated", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseTrialState.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseTrialState.setDescription("This indicates the state of the trial license. Invalid means the license is not valid for this box, unused means the license is never used, avtive means it has been used at least once and expired means it has expired and can't be used any more. Duplicated means the license has same package name with other license and but both serial numbers are different in the same device, this is only for the non-node locked license. Value duplicated(5) is supported only on FastIron.")
fdryLicenseVendorInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseVendorInfo.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseVendorInfo.setDescription('This is the Brocade specific package data which is an octet string. This contains encoded information of license specific information such as package bit mask, number of ports, etc.')
fdryLicenseSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseSlot.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseSlot.setDescription('This indicates the slot number of the module, the license belongs to. There is a one to one mapping between LID and slot number, as each module has unique LID and can be present only in one slot.')
fdryLicenseMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("nodeLocked", 1), ("nonNodeLocked", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseMode.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseMode.setDescription('The mode of the license, which can be either node locked or non-node locked mode. This objects is supported only on FastIron.')
fdryLicenseSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseSerialNumber.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseSerialNumber.setDescription('This is the serial number of the license. This is only for the non-node locked license. This objects is supported only on FastIron.')
fdryLicenseCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 1, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicenseCapacity.setStatus('current')
if mibBuilder.loadTexts: fdryLicenseCapacity.setDescription('The capacity of the license. For POD license, this is the number of ports. For premium or advance license, it is 1. This objects is supported only on FastIron.')
fdryLicensedFeatureInfo = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 2), Bits().clone(namedValues=NamedValues(("ospf", 0), ("isis", 1), ("bgp", 2), ("mpls", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fdryLicensedFeatureInfo.setStatus('current')
if mibBuilder.loadTexts: fdryLicensedFeatureInfo.setDescription('The features or packages enabled/licensed on the system.')
brcdPortLicenseTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 3), )
if mibBuilder.loadTexts: brcdPortLicenseTable.setStatus('current')
if mibBuilder.loadTexts: brcdPortLicenseTable.setDescription('A list of ports which require the port license.')
brcdPortLicenseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 3, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "brcdPortLicenseIndex"))
if mibBuilder.loadTexts: brcdPortLicenseEntry.setStatus('current')
if mibBuilder.loadTexts: brcdPortLicenseEntry.setDescription('An entry in a licensed port table.')
brcdPortLicenseIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 3, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: brcdPortLicenseIndex.setStatus('current')
if mibBuilder.loadTexts: brcdPortLicenseIndex.setDescription('The port/interface index (ifindex).')
brcdPortLicenseStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 15, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("validLic", 1), ("noLic", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: brcdPortLicenseStatus.setStatus('current')
if mibBuilder.loadTexts: brcdPortLicenseStatus.setDescription("The current license state of the port. validLic(1) ............ the port requires port license, and has valid license noLic(2) ........... the port requires port license, and doesn't have valid license ")
brcdSwPackageFname = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwPackageFname.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageFname.setDescription('Name of the release package file or manifest file (including path) currently associated with the system. When the object is not used, the value is a zero length string.')
brcdSwPackageLoad = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("none", 1), ("tftpToPrimary", 2), ("tftpToSecondary", 3), ("tftpToMgmtModulePrimaryIntfModuleSecondary", 4), ("tftpToMgmtModuleSecondaryIntfModulePrimary", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwPackageLoad.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageLoad.setDescription('Action object to upgrade the system using a release package. none(1) is the value after the system comes up and should not be used for set-request. The following values will download the release package from a TFTP server and upgrade the system : tftpToPrimary(2) installs both MP and LP application images to primary code. tftpToSecondary(3) installs both MP and LP application images to secondary code. tftpToMgmtModulePrimaryIntfModuleSecondary (4) installs MP application image to primary code and the LP application images to secondary code. tftpToMgmtModuleSecondaryIntfModulePrimary (5) installs MP application image to secondary code and the LP application images to primary code.')
brcdSwPackageLoadStatus = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25))).clone(namedValues=NamedValues(("normal", 1), ("started", 2), ("internalError", 3), ("manifestFileDownloadError", 4), ("manifestFileValidationError", 5), ("downloadingManagementModuleBoot", 6), ("downloadingManagementModuleMonitor", 7), ("downloadingManagementModuleApplication", 8), ("downloadingInterfaceModuleBoot", 9), ("downloadingInterfaceModuleMonitor", 10), ("downloadingInterfaceModuleApplication", 11), ("downloadingInterfaceModuleFpga", 12), ("downloadingFpgaMBridge", 13), ("downloadingFpgaSBridge", 14), ("downloadingFpgaHBridge", 15), ("upgradingManagementModuleBoot", 16), ("upgradingManagementModuleMonitor", 17), ("upgradingManagementModuleApplication", 18), ("upgradingInterfaceModuleBoot", 19), ("upgradingInterfaceModuleMonitor", 20), ("upgradingInterfaceModuleApplication", 21), ("upgradingInterfaceModuleFpga", 22), ("upgradingFpgaMBridge", 23), ("upgradingFpgaSBridge", 24), ("upgradingFpgaHBridge", 25)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: brcdSwPackageLoadStatus.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageLoadStatus.setDescription('This object indicates the progress of the upgrade operation. <TBD: more description> ')
brcdSwPackageUpgradeAllImages = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 4), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwPackageUpgradeAllImages.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeAllImages.setDescription('To specify an all-image upgrade. If set to true(1), the upgrade sequence will include the MP FPGA images (MBRIDGE/MBRIDGE32 and SBRIDGE/HSBRIDGE). Default is false(2) which will upgrade only the MP and LP Monitor images, MP and LP Application images, and LP Bundled FPGA for MLX/XMR. While CES/CER will upgrade the monitor, application, and FPGA images. This object must be set along with brcdSwPackageLoad. For a read operation, this will always return false(2).')
brcdSwPackageUpgradeResultTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 5), )
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultTable.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultTable.setDescription('A table containing the summary of the upgrade operation.')
brcdSwPackageUpgradeResultEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 5, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "brcdSwPackageUpgradeResultIndex"))
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultEntry.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultEntry.setDescription('<TBD>')
brcdSwPackageUpgradeResultIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 5, 1, 1), Unsigned32())
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultIndex.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultIndex.setDescription('The sequential index, or upgrade step.')
brcdSwPackageUpgradeResultImageType = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 5, 1, 2), BrcdImageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultImageType.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultImageType.setDescription('The associated image type for this step of upgrade process.')
brcdSwPackageUpgradeResultStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 5, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("ok", 1), ("downloadFailed", 2), ("installFailed", 3), ("skipped", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultStatus.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultStatus.setDescription('The upgrade status for this particular image upgrade.')
brcdSwPackageUpgradeResultTimeStamp = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 5, 1, 4), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultTimeStamp.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultTimeStamp.setDescription('The timestamp when this upgrade step was performed.')
brcdSwPackageUpgradeResultDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 5, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultDescription.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeResultDescription.setDescription("Summary description for this particular image upgrade. This is empty when brcdSwPackageLoadResultStatus is 'ok'.")
brcdSwPackageUpgradeSkipVersionCheck = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwPackageUpgradeSkipVersionCheck.setStatus('current')
if mibBuilder.loadTexts: brcdSwPackageUpgradeSkipVersionCheck.setDescription("To skip the version comparison of the FPGA images. By default it performs the version comparison between the image version in the manifest file and the one installed in in the system. Setting to 'true' forces the system to upgrade the images by skipping the version check. This object must be set along with brcdSwPackageLoad. For a read operation, this will always return false(2).")
brcdSwIntfModAutoUpgradeMode = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 2, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("unknown", 1), ("disabled", 2), ("tftp", 3), ("slot1", 4), ("slot2", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeMode.setStatus('current')
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeMode.setDescription('Specifies the mode of LP Auto-upgrade. <TBD: more description> ')
brcdSwIntfModAutoUpgradeTftpAddrType = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 2, 2), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeTftpAddrType.setStatus('current')
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeTftpAddrType.setDescription('TFTP server IP address Type. Supported address types are ipv4(1) and ipv6(2). <TBD: more description>')
brcdSwIntfModAutoUpgradeTftpAddr = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 2, 3), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeTftpAddr.setStatus('current')
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeTftpAddr.setDescription('TFTP server IP address. <TBD: more description>')
brcdSwIntfModAutoUpgradeSrcPath = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 2, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 128))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeSrcPath.setStatus('current')
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeSrcPath.setDescription("Specifies the path to the topmost directory of the release package relative to the root directory. If the source is a TFTP server, the root is the TFTP root. If the source is slot1 or slot2, the root is top-level directory '/'")
brcdSwIntfModAutoUpgradeAllImages = MibScalar((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 16, 1, 2, 5), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeAllImages.setStatus('deprecated')
if mibBuilder.loadTexts: brcdSwIntfModAutoUpgradeAllImages.setDescription("Used to specify an all-image upgrade. If set to true(1), the upgrade sequence will include the LP BOOT image. Default is false(2), which will upgrade only the LP FPGA images. For a read operation, this will return the configured value. Deprecated this OID. SET operation will not take into effect, and READ operations will always return 'false'.")
snAgentTaskCpuTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 1), )
if mibBuilder.loadTexts: snAgentTaskCpuTable.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskCpuTable.setDescription('Table to display the Task ID, Task Name, CPU state, wait time, hold time and Activity with respect to each task in the device .')
snAgentTaskCpuEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 1, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentTaskCpuTaskID"))
if mibBuilder.loadTexts: snAgentTaskCpuEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskCpuEntry.setDescription('An entry containing the Task ID, Task Name, CPU state, wait time, hold time and Activity with respect to each task in the device ')
snAgentTaskCpuTaskID = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskCpuTaskID.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskCpuTaskID.setDescription('Represents the Task Identification number')
snAgentTaskCpuTaskName = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 1, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskCpuTaskName.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskCpuTaskName.setDescription('Represents the Task name')
snAgentTaskCpuState = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 1, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 8))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskCpuState.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskCpuState.setDescription('Represents the current state of the task')
snAgentTaskCpuWaitTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 1, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskCpuWaitTime.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskCpuWaitTime.setDescription('A count used to represent the wait time in milliseconds')
snAgentTaskCpuHoldTime = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 1, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskCpuHoldTime.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskCpuHoldTime.setDescription('A count used to represent the hold time in milliseconds')
snAgentTaskCpuTaskActivity = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 1, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 1))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskCpuTaskActivity.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskCpuTaskActivity.setDescription('Represents the Task Activity.A - Was running since last show, I - Idle')
snAgentTaskMQTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2), )
if mibBuilder.loadTexts: snAgentTaskMQTable.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQTable.setDescription('Table to display the Message Queue Priority, Length, Depth, Msgs and the failed count with respect to each task and corresponding MQ priority in the device ')
snAgentTaskMQEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentTaskMQTaskID"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentTaskMQPriority"))
if mibBuilder.loadTexts: snAgentTaskMQEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQEntry.setDescription('An entry containing the Message Queue Priority, Length, Depth, Msgs and the failed count with respect to each task and corresponding MQ priority in the device ')
snAgentTaskMQTaskID = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQTaskID.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQTaskID.setDescription('Represents the Task Identification number')
snAgentTaskMQPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQPriority.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQPriority.setDescription('Represents the Priority of the Message Queue')
snAgentTaskMQTaskName = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQTaskName.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQTaskName.setDescription('Represents the Task name')
snAgentTaskMQLength = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQLength.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQLength.setDescription('Represents the size of the Message Queue')
snAgentTaskMQDepth = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 5), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQDepth.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQDepth.setDescription('A count used to represent the message queue depth')
snAgentTaskMQMaxDepth = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQMaxDepth.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQMaxDepth.setDescription('A count used to represent the maximum depth reached ever(clear on read counter)')
snAgentTaskMQStickyMaxDepth = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQStickyMaxDepth.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQStickyMaxDepth.setDescription('A count used to represent the maximum depth reached ever (This counter is not clear on read)')
snAgentTaskMQMsgs = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 8), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQMsgs.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQMsgs.setDescription('A count used to represent the number of messages')
snAgentTaskMQMaxMsgs = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQMaxMsgs.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQMaxMsgs.setDescription('A count used to represent the maximum number of messages reached ever(clear on read counter)')
snAgentTaskMQStickyMaxMsgs = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQStickyMaxMsgs.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQStickyMaxMsgs.setDescription('A count used to represent the maximum number of messages reached ever (This counter is not clear on read)')
snAgentTaskMQFailedCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 11), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQFailedCount.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQFailedCount.setDescription('A count used to represent failed count(clear on read counter)')
snAgentTaskMQStickyFailedCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskMQStickyFailedCount.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskMQStickyFailedCount.setDescription('A count used to represent failed count(This counter is not clear on read)')
snAgentTaskBufferTable = MibTable((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 3), )
if mibBuilder.loadTexts: snAgentTaskBufferTable.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskBufferTable.setDescription('Table to display the Task ID, Task name , Pool_ID and the buffer count of each task in the device')
snAgentTaskBufferEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 3, 1), ).setIndexNames((0, "FOUNDRY-SN-AGENT-MIB", "snAgentTaskBufferTaskID"), (0, "FOUNDRY-SN-AGENT-MIB", "snAgentTaskBufferPoolID"))
if mibBuilder.loadTexts: snAgentTaskBufferEntry.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskBufferEntry.setDescription('An entry containing the Task ID, Task name , Pool_ID and the buffer count of each task in the device')
snAgentTaskBufferTaskID = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 3, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskBufferTaskID.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskBufferTaskID.setDescription('Represents the Task Identification number')
snAgentTaskBufferPoolID = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskBufferPoolID.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskBufferPoolID.setDescription('Represents the Pool Identification number')
snAgentTaskBufferTaskName = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 3, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskBufferTaskName.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskBufferTaskName.setDescription('Represents the Task name')
snAgentTaskBufferCount = MibTableColumn((1, 3, 6, 1, 4, 1, 1991, 1, 1, 2, 17, 3, 1, 4), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: snAgentTaskBufferCount.setStatus('current')
if mibBuilder.loadTexts: snAgentTaskBufferCount.setDescription('A count used to represent the number of buffers allocated to a task')
mibBuilder.exportSymbols("FOUNDRY-SN-AGENT-MIB", snChasFanStatus=snChasFanStatus, snAgTftpServerAddrType=snAgTftpServerAddrType, snStackSecSwitchEnabled=snStackSecSwitchEnabled, snAgentConfigModule2Type=snAgentConfigModule2Type, snAgentConfigModuleNumberOfPorts=snAgentConfigModuleNumberOfPorts, snAgentConfigModuleMgmtModuleType=snAgentConfigModuleMgmtModuleType, snChasFanIndex=snChasFanIndex, snAgBootSeqRowStatus=snAgBootSeqRowStatus, snAgentTaskBufferTable=snAgentTaskBufferTable, snAgReload=snAgReload, snAgGblEnableLinkUpTrap=snAgGblEnableLinkUpTrap, snAgentTempSensorDescr=snAgentTempSensorDescr, snAgentUserAccntName=snAgentUserAccntName, snAgentTaskMQPriority=snAgentTaskMQPriority, snAgentCpu=snAgentCpu, snAgStaticSysLogBufferCriticalLevel=snAgStaticSysLogBufferCriticalLevel, snAgentBrdMainBrdDescription=snAgentBrdMainBrdDescription, snChasGen=snChasGen, snAgentSysParaConfigTable=snAgentSysParaConfigTable, snChasEnableFanTrap=snChasEnableFanTrap, snChasMainBrdDescription=snChasMainBrdDescription, snCAMIpStatIfIndex=snCAMIpStatIfIndex, snAgentTemp2Table=snAgentTemp2Table, snChasFan2Index=snChasFan2Index, snChasPwrSupply2OperStatus=snChasPwrSupply2OperStatus, snAgentConfigModule2Slot=snAgentConfigModule2Slot, snAgentBoot=snAgentBoot, snAgBootSeqIndex=snAgBootSeqIndex, snAgentTaskMQMaxDepth=snAgentTaskMQMaxDepth, snAgentTaskBufferTaskID=snAgentTaskBufferTaskID, snAgSysLogGblDroppedCount=snAgSysLogGblDroppedCount, snAgentConfigModule2Entry=snAgentConfigModule2Entry, snAgFlashImgVer=snAgFlashImgVer, snAgSysLogBufferTimeStamp=snAgSysLogBufferTimeStamp, snChasWarningTemperature=snChasWarningTemperature, snAgentBrdSpeedLeds=snAgentBrdSpeedLeds, snAgentTempThresholdTable=snAgentTempThresholdTable, snCamStatFreePool0Entries=snCamStatFreePool0Entries, snAgentBrdSpeedLedString=snAgentBrdSpeedLedString, snChasPwrSupplyStatus=snChasPwrSupplyStatus, snAgGblTelnetLoginTimeout=snAgGblTelnetLoginTimeout, snCpuProcess5MinUtil=snCpuProcess5MinUtil, snAgCfgEosTable=snAgCfgEosTable, snCamStatAddIPRouteCount=snCamStatAddIPRouteCount, snAgentUserAccntEntry=snAgentUserAccntEntry, snChasUnit=snChasUnit, snAgGblIfIpMask=snAgGblIfIpMask, MacAddress=MacAddress, snAgentTaskMQTaskID=snAgentTaskMQTaskID, fdryLicensedFeatureInfo=fdryLicensedFeatureInfo, snAgSpBootSeqIndex=snAgSpBootSeqIndex, snAgTftpServerAddr=snAgTftpServerAddr, snChasUnitNumSlots=snChasUnitNumSlots, snAgentTempTable=snAgentTempTable, snAgentConfigModule2MgmtModuleType=snAgentConfigModule2MgmtModuleType, snAgGblTelnetTimeout=snAgGblTelnetTimeout, snAgGblCpuUtil1MinAvg=snAgGblCpuUtil1MinAvg, snAgBuildDate=snAgBuildDate, snAgSystemDebugTotalIn=snAgSystemDebugTotalIn, snStackSecSwitchPortCnts=snStackSecSwitchPortCnts, snAgentRedunBkupBootLoad=snAgentRedunBkupBootLoad, DisplayString=DisplayString, snAgentRedunSyncConfig=snAgentRedunSyncConfig, snAgentRedunSwitchOver=snAgentRedunSwitchOver, brcdSwPackageUpgradeAllImages=brcdSwPackageUpgradeAllImages, snAgentTaskBufferPoolID=snAgentTaskBufferPoolID, snAgentSysParaConfigIndex=snAgentSysParaConfigIndex, snChasPwrSupplyTable=snChasPwrSupplyTable, snAgBootSeqInstruction=snAgBootSeqInstruction, snAgentTempEntry=snAgentTempEntry, snAgentTempSlotNum=snAgentTempSlotNum, snCamStatAddIPSessionCount=snCamStatAddIPSessionCount, snAgSystemDebug=snAgSystemDebug, brcdSwPackageUpgradeResultStatus=brcdSwPackageUpgradeResultStatus, snChasProductType=snChasProductType, snAgent=snAgent, snCAMIpStatTotalEntries=snCAMIpStatTotalEntries, snAgentSysParaConfigDescription=snAgentSysParaConfigDescription, snAgStaticSysLogBufferIndex=snAgStaticSysLogBufferIndex, snAgentConfigModuleNumberOfCpus=snAgentConfigModuleNumberOfCpus, snAgGblBufferShortage=snAgGblBufferShortage, snStackSecSwitchMacAddr=snStackSecSwitchMacAddr, snAgentBrdRxTrafficLeds=snAgentBrdRxTrafficLeds, snAgGblCpuUtil5SecAvg=snAgGblCpuUtil5SecAvg, snAgentUserAccntEncryptCode=snAgentUserAccntEncryptCode, snCamStatIPMCastFailCount=snCamStatIPMCastFailCount, snChasSpeedLeds=snChasSpeedLeds, snCAMIpStatEntry=snCAMIpStatEntry, snAgGblBannerExec=snAgGblBannerExec, snChasFan2OperStatus=snChasFan2OperStatus, snAgTrpRcvrIndex=snAgTrpRcvrIndex, fdryLicenseLid=fdryLicenseLid, snAgentTaskMQStickyMaxDepth=snAgentTaskMQStickyMaxDepth, snAgGblDataRetrieveMode=snAgGblDataRetrieveMode, snAgSysLogServerRowStatus=snAgSysLogServerRowStatus, snAgentSysParaConfigEntry=snAgentSysParaConfigEntry, snAgentBrdAlarmLedString=snAgentBrdAlarmLedString, snAgTrpRcvrIpAddr=snAgTrpRcvrIpAddr, snAgStaticSysLogBufferTimeStamp=snAgStaticSysLogBufferTimeStamp, snAgentHwICBMCounterEntry=snAgentHwICBMCounterEntry, brcdSwIntfModAutoUpgradeTftpAddr=brcdSwIntfModAutoUpgradeTftpAddr, snAgSysLogBufferCriticalLevel=snAgSysLogBufferCriticalLevel, snAgentSysParaConfig=snAgentSysParaConfig, snChasExpBrdDescription=snChasExpBrdDescription, snAgGblEnableModuleInsertedTrap=snAgGblEnableModuleInsertedTrap, snAgentBrdEntry=snAgentBrdEntry, snChasMediaLeds=snChasMediaLeds, snChasTrafficLeds=snChasTrafficLeds, snStackMaxSecSwitch=snStackMaxSecSwitch, snCpuProcess15MinUtil=snCpuProcess15MinUtil, snCamStatFreePool2Entries=snCamStatFreePool2Entries, snAgentBrdTable=snAgentBrdTable, snAgentTemp=snAgentTemp, snAgGblPassword=snAgGblPassword, snChasEnableTempWarnTrap=snChasEnableTempWarnTrap, snChasFanEntry=snChasFanEntry, snStackSecSwitchIpAddr=snStackSecSwitchIpAddr, snAgentRedunActiveMgmtMod=snAgentRedunActiveMgmtMod, snAgSpBootSeqFilename=snAgSpBootSeqFilename, snAgentUserAccntPrivilege=snAgentUserAccntPrivilege, snAgentConfigModuleTable=snAgentConfigModuleTable, snAgSysLogGblCriticalLevel=snAgSysLogGblCriticalLevel, snAgSystemDebugBMFreeBuffer=snAgSystemDebugBMFreeBuffer, snAgBuildVer=snAgBuildVer, snAgEraseNVRAM=snAgEraseNVRAM, snAgCfgEosIndex=snAgCfgEosIndex, snAgentBrd2RedundantStatus=snAgentBrd2RedundantStatus, snAgentConfigModuleOperStatus=snAgentConfigModuleOperStatus, snChasFan2Unit=snChasFan2Unit, snChasUnitTable=snChasUnitTable, snStackTotalSecSwitch=snStackTotalSecSwitch, snCamStatLevel1=snCamStatLevel1, snAgentTaskMQEntry=snAgentTaskMQEntry, snAgentConfigModule2NumberOfPorts=snAgentConfigModule2NumberOfPorts, snAgImgLoad=snAgImgLoad, snAgStaticSysLogBufferTable=snAgStaticSysLogBufferTable, snAgSystemDebugBMFreeBufferMgmt=snAgSystemDebugBMFreeBufferMgmt, snAgTftpServerIp=snAgTftpServerIp, snAgentConfigModuleType=snAgentConfigModuleType, snAgSystemDRAMTotal=snAgSystemDRAMTotal, snCamStatIPSessionFailCount=snCamStatIPSessionFailCount, snAgSysLogServerEntry=snAgSysLogServerEntry, brcdSwPackageUpgradeSkipVersionCheck=brcdSwPackageUpgradeSkipVersionCheck, snAgTrpRcvrUDPPort=snAgTrpRcvrUDPPort, snStackSecSwitchIndex=snStackSecSwitchIndex, snAgSpBootSeqSpNumber=snAgSpBootSeqSpNumber, snCpuProcessName=snCpuProcessName, snAgSystemDebugDRAMBuffer=snAgSystemDebugDRAMBuffer, brcdSwPackageGroup=brcdSwPackageGroup, snAgentHwICBMCounterTable=snAgentHwICBMCounterTable, snAgBootSeqIpAddr=snAgBootSeqIpAddr, brcdPortLicenseTable=brcdPortLicenseTable, snAgSystemDRAM=snAgSystemDRAM, snAgentCpuUtil100thPercent=snAgentCpuUtil100thPercent, PYSNMP_MODULE_ID=snAgent, snChasUnitSerNum=snChasUnitSerNum, snAgGblCurrentSecurityLevel=snAgGblCurrentSecurityLevel, snChasShutdownTemperature=snChasShutdownTemperature, snCamStatFreePool3Entries=snCamStatFreePool3Entries, snAgSystemDRAMUtil=snAgSystemDRAMUtil, snAgentTaskMQFailedCount=snAgentTaskMQFailedCount, snCAMIpStatLevel=snCAMIpStatLevel, snChasStatusLeds=snChasStatusLeds, snAgentConfigModuleDescription=snAgentConfigModuleDescription, brcdSwIntfModAutoUpgradeSrcPath=brcdSwIntfModAutoUpgradeSrcPath, brcdSwPackageUpgradeResultTable=brcdSwPackageUpgradeResultTable, snAgSoftwareFeature=snAgSoftwareFeature, brcdSwPackageUpgradeResultIndex=brcdSwPackageUpgradeResultIndex, snAgSysLogBufferCalTimeStamp=snAgSysLogBufferCalTimeStamp, snChasActualTemperature=snChasActualTemperature, snAgentBrd2MainPortTotal=snAgentBrd2MainPortTotal, snChasExpPortTotal=snChasExpPortTotal, snChasMainBrdId=snChasMainBrdId, snStackSecSwitchAck=snStackSecSwitchAck, snAgSysLogBufferIndex=snAgSysLogBufferIndex, snAgentPoe=snAgentPoe, snAgentTaskCpuTaskName=snAgentTaskCpuTaskName, snAgentBrdExpBrdId=snAgentBrdExpBrdId, snChasFlashCard=snChasFlashCard, snChasPwrSupply2Unit=snChasPwrSupply2Unit, snStackPriSwitchMode=snStackPriSwitchMode, snAgentBrdTxTrafficLedString=snAgentBrdTxTrafficLedString, snAgentTempValue=snAgentTempValue, snAgSystemDebugBMBuffer=snAgSystemDebugBMBuffer, snChasExpBrdId=snChasExpBrdId, fdryLicensePackageName=fdryLicensePackageName, snAgentSysParaConfigCurrent=snAgentSysParaConfigCurrent, snCamStatAddL2SessionCount=snCamStatAddL2SessionCount, snAgentTaskBufferEntry=snAgentTaskBufferEntry, snAgentHw=snAgentHw, fdryLicenseTrialTimeLeft=fdryLicenseTrialTimeLeft, snAgGblBannerMotd=snAgGblBannerMotd, snAgSysLogGblBufferSize=snAgSysLogGblBufferSize, snCAMStatTable=snCAMStatTable, snCAMIpStatFreeEntries=snCAMIpStatFreeEntries, snCamStatAddMACCount=snCamStatAddMACCount, snChasFan2Table=snChasFan2Table, snAgGblEnableLinkDownTrap=snAgGblEnableLinkDownTrap, snAgentTask=snAgentTask, snAgentTrp=snAgentTrp, snAgentTempThresholdLowValue=snAgentTempThresholdLowValue, snAgCfgEosChkSum=snAgCfgEosChkSum, snAgentTaskCpuTaskActivity=snAgentTaskCpuTaskActivity, snCamStatAddVLANCount=snCamStatAddVLANCount, snAgentBrdModuleStatus=snAgentBrdModuleStatus, snCamStatMacFailCount=snCamStatMacFailCount, snAgentBrd=snAgentBrd, fdryLicenseSerialNumber=fdryLicenseSerialNumber, snAgentUserGbl=snAgentUserGbl, snChasFactorySerialNumber=snChasFactorySerialNumber, snAgentBrdExpBrdDescription=snAgentBrdExpBrdDescription, snAgentBrdRxTrafficLedString=snAgentBrdRxTrafficLedString, snAgSysLogGblFlushedCount=snAgSysLogGblFlushedCount, fdryLicenseTable=fdryLicenseTable, snChasArchitectureType=snChasArchitectureType, snAgSysLogBufferEntry=snAgSysLogBufferEntry, snAgWebMgmtServerTcpPort=snAgWebMgmtServerTcpPort, snCamStatDeleteDMACamCount=snCamStatDeleteDMACamCount, snAgDefGwayIp=snAgDefGwayIp, snChasIdNumber=snChasIdNumber, snAgentConfigModule=snAgentConfigModule, snChasSerNum=snChasSerNum, snAgSystemLog=snAgSystemLog, snAgentBrdRedundantStatus=snAgentBrdRedundantStatus, snAgentBrd2ModuleStatus=snAgentBrd2ModuleStatus, fdryLicenseEntry=fdryLicenseEntry, brcdPortLicenseIndex=brcdPortLicenseIndex, snChasUnitActualTemperature=snChasUnitActualTemperature, snAgentBrdTrafficLedString=snAgentBrdTrafficLedString, snAgentBrdIndex=snAgentBrdIndex, snAgentConfigModule2Unit=snAgentConfigModule2Unit, snStackSecSwitchInfo=snStackSecSwitchInfo, snCamStatAddIPMCastCount=snCamStatAddIPMCastCount, snCamStatIPRouteFailCount=snCamStatIPRouteFailCount, snAgCfgEosEntry=snAgCfgEosEntry, snCamStatRouteLookupCount=snCamStatRouteLookupCount, snAgentCpuUtilEntry=snAgentCpuUtilEntry, snChasPwrSupply2Table=snChasPwrSupply2Table, snAgBootSeqTable=snAgBootSeqTable, snAgentTaskMQStickyMaxMsgs=snAgentTaskMQStickyMaxMsgs, snAgentBrd2MainBrdDescription=snAgentBrd2MainBrdDescription, snStackSecSwitchCfgCmd=snStackSecSwitchCfgCmd, snAgCfgEos=snAgCfgEos, snAgentTaskMQTable=snAgentTaskMQTable, brcdPortLicenseStatus=brcdPortLicenseStatus, snAgentSysParaConfigMax=snAgentSysParaConfigMax, snAgentBrdTrafficLeds=snAgentBrdTrafficLeds, snAgImgLoadSPModuleNumber=snAgImgLoadSPModuleNumber, snAgGblIfIpAddr=snAgGblIfIpAddr, snAgentCpuUtilInterval=snAgentCpuUtilInterval, snAgentBrdTxTrafficLeds=snAgentBrdTxTrafficLeds, snAgGblPasswordChangeMode=snAgGblPasswordChangeMode, snChasUnitShutdownTemperature=snChasUnitShutdownTemperature, snAgentTemp2UnitNum=snAgentTemp2UnitNum)
mibBuilder.exportSymbols("FOUNDRY-SN-AGENT-MIB", snAgentConfigModule2SerialNumber=snAgentConfigModule2SerialNumber, snAgentTaskCpuTaskID=snAgentTaskCpuTaskID, snAgentBrd2Slot=snAgentBrd2Slot, snAgGblDeleteFirstBeforeDownload=snAgGblDeleteFirstBeforeDownload, snCpuProcess1MinUtil=snCpuProcess1MinUtil, fdryLicenseSlot=fdryLicenseSlot, snAgentTaskCpuEntry=snAgentTaskCpuEntry, snAgSFlowSourceInterface=snAgSFlowSourceInterface, snChasNumSlots=snChasNumSlots, snAgGblTrapMessage=snAgGblTrapMessage, snAgGblEnableTelnetServer=snAgGblEnableTelnetServer, snAgBootSeqFilename=snAgBootSeqFilename, snChasUnitIndex=snChasUnitIndex, snChasSystemMode=snChasSystemMode, brcdSwPackageUpgradeResultEntry=brcdSwPackageUpgradeResultEntry, snAgentBrdAlarmLeds=snAgentBrdAlarmLeds, snStackSecSwitchSyncCmd=snStackSecSwitchSyncCmd, snChasType=snChasType, snAgentBrdStatusLedString=snAgentBrdStatusLedString, snAgTrpRcvrCommunityOrSecurityName=snAgTrpRcvrCommunityOrSecurityName, snAgTrpRcvrStatus=snAgTrpRcvrStatus, snAgentCpuUtilPercent=snAgentCpuUtilPercent, snAgSystemDRAMForBGP=snAgSystemDRAMForBGP, snAgWriteNVRAM=snAgWriteNVRAM, snStackFmpSetProcess=snStackFmpSetProcess, brcdPortLicenseEntry=brcdPortLicenseEntry, snAgStaticSysLogBufferEntry=snAgStaticSysLogBufferEntry, snAgGblDynMemUtil=snAgGblDynMemUtil, snAgSystemDebugCpuQueueRead=snAgSystemDebugCpuQueueRead, snAgentCpuProcessEnable=snAgentCpuProcessEnable, snAgentHwICBMCounterReadOutput=snAgentHwICBMCounterReadOutput, snAgStaticSysLogBufferMessage=snAgStaticSysLogBufferMessage, snCamStatDMAMasterNumber=snCamStatDMAMasterNumber, snAgTrapHoldTime=snAgTrapHoldTime, snAgImgLoadSPModuleType=snAgImgLoadSPModuleType, snAgSysLogGblServer=snAgSysLogGblServer, snAgentTempSensorId=snAgentTempSensorId, snChasMainPortTotal=snChasMainPortTotal, snAgBuildtime=snAgBuildtime, snAgGblCpuUtilCollect=snAgGblCpuUtilCollect, snChasFanDescription=snChasFanDescription, snCamStatFreePool1Entries=snCamStatFreePool1Entries, snAgSysLogGblPersistenceEnable=snAgSysLogGblPersistenceEnable, brcdSwPackageLoadStatus=brcdSwPackageLoadStatus, snAgGblSecurityLevelSet=snAgGblSecurityLevelSet, snAgentTempThresholdLevel=snAgentTempThresholdLevel, snAgBootSeqEntry=snAgBootSeqEntry, snChasPwrSupplyDescription=snChasPwrSupplyDescription, snAgentRedunBkupCopyBootCode=snAgentRedunBkupCopyBootCode, snCamStatDMAIdNumber=snCamStatDMAIdNumber, snStackSecSwitchTable=snStackSecSwitchTable, snAgSysLogGblLoggedCount=snAgSysLogGblLoggedCount, snAgSysLogBufferTable=snAgSysLogBufferTable, snAgTrpRcvrTable=snAgTrpRcvrTable, snAgentConfigModuleSerialNumber=snAgentConfigModuleSerialNumber, brcdSwPackageLoad=brcdSwPackageLoad, snAgSysLogGblClear=snAgSysLogGblClear, fdryLicenseHash=fdryLicenseHash, snAgentCpuUtilCpuId=snAgentCpuUtilCpuId, snAgentUserAccntPassword=snAgentUserAccntPassword, snChasFlashCardLeds=snChasFlashCardLeds, snAgSystemDebugDRAMToBMCopyFail=snAgSystemDebugDRAMToBMCopyFail, snAgGblEnableModuleRemovedTrap=snAgGblEnableModuleRemovedTrap, snChasEnablePwrSupplyTrap=snChasEnablePwrSupplyTrap, snAgSysLogGbl=snAgSysLogGbl, snStackGen=snStackGen, snAgentBrdMediaLedString=snAgentBrdMediaLedString, snAgentEnableMgmtModRedunStateChangeTrap=snAgentEnableMgmtModRedunStateChangeTrap, snCamStatAddIPXCount=snCamStatAddIPXCount, snAgentConfigModuleRowStatus=snAgentConfigModuleRowStatus, snAgentHwICBMCounterWriteOutput=snAgentHwICBMCounterWriteOutput, snAgentBrd2Table=snAgentBrd2Table, snAgSystemDebugIpcGigLock=snAgSystemDebugIpcGigLock, snAgentConfigModule2Table=snAgentConfigModule2Table, snAgentRedundant=snAgentRedundant, snCamStatAddIPHostCount=snCamStatAddIPHostCount, snCpuProcessTable=snCpuProcessTable, snAgentHwICBMCounterWriteInput=snAgentHwICBMCounterWriteInput, snAgentBrdMemoryUtil100thPercent=snAgentBrdMemoryUtil100thPercent, snChasUnitEntry=snChasUnitEntry, snChasFanOperStatus=snChasFanOperStatus, snAgentConfigModule2Description=snAgentConfigModule2Description, snAgGblReadWriteCommunity=snAgGblReadWriteCommunity, snCpuProcessEntry=snCpuProcessEntry, snAgentConfigModuleEntry=snAgentConfigModuleEntry, snAgentConfigModule2RowStatus=snAgentConfigModule2RowStatus, brcdSwIntfModAutoUpgradeAllImages=brcdSwIntfModAutoUpgradeAllImages, snAgConfigFromNVRAM=snAgConfigFromNVRAM, snAgSysLogGblOverrunCount=snAgSysLogGblOverrunCount, snAgentBrdMediaLeds=snAgentBrdMediaLeds, snAgentConfigModule2OperStatus=snAgentConfigModule2OperStatus, snAgGblBannerIncoming=snAgGblBannerIncoming, snChasPwrSupplyIndex=snChasPwrSupplyIndex, snAgSpBootSeqEntry=snAgSpBootSeqEntry, snAgentTaskBufferTaskName=snAgentTaskBufferTaskName, snAgentTaskMQMsgs=snAgentTaskMQMsgs, snAgSystemDRAMForOSPF=snAgSystemDRAMForOSPF, brcdSwIntfModAutoUpgradeTftpAddrType=brcdSwIntfModAutoUpgradeTftpAddrType, snAgentConfigModule2NumberOfCpus=snAgentConfigModule2NumberOfCpus, snAgentBrdPartNumber=snAgentBrdPartNumber, snAgGblDynMemFree=snAgGblDynMemFree, snAgentBrdSerialNumber=snAgentBrdSerialNumber, snAgentHwICBMCounterFreeDepth=snAgentHwICBMCounterFreeDepth, snAgSysLogServerIP=snAgSysLogServerIP, snCamStatLevel2=snCamStatLevel2, snAgImgVer=snAgImgVer, snAgSpBootSeqInstruction=snAgSpBootSeqInstruction, snStackSecSwitchSubnetMask=snStackSecSwitchSubnetMask, snAgentCpuUtilTable=snAgentCpuUtilTable, fdryLicenseTrialTimeElapsed=fdryLicenseTrialTimeElapsed, snStackSecSwitchSlotId=snStackSecSwitchSlotId, snAgSysLogGblFacility=snAgSysLogGblFacility, snAgSystemDRAMFree=snAgSystemDRAMFree, snAgentCpuUtilValue=snAgentCpuUtilValue, snAgentHwICBMCounterReadInput=snAgentHwICBMCounterReadInput, snAgentTaskMQDepth=snAgentTaskMQDepth, snAgTrpRcvrEntry=snAgTrpRcvrEntry, snAgentBrdMainBrdId=snAgentBrdMainBrdId, snAgentBrd2Entry=snAgentBrd2Entry, snAgSysLogBufferMessage=snAgSysLogBufferMessage, snAgentBrd2Unit=snAgentBrd2Unit, snCamStatL2SessionFailCount=snCamStatL2SessionFailCount, snAgStaticSysLogBufferCalTimeStamp=snAgStaticSysLogBufferCalTimeStamp, snAgentCpuUtilSlotNum=snAgentCpuUtilSlotNum, snChasPwrSupply2Description=snChasPwrSupply2Description, snChasPwrSupplyEntry=snChasPwrSupplyEntry, snAgGblSecurityLevelBinding=snAgGblSecurityLevelBinding, snChasUnitWarningTemperature=snChasUnitWarningTemperature, snAgentBrdMainPortTotal=snAgentBrdMainPortTotal, snCamStatLevel3=snCamStatLevel3, snAgentRedunGbl=snAgentRedunGbl, snCamStatFreeL2Entries=snCamStatFreeL2Entries, snAgSysLogServerUDPPort=snAgSysLogServerUDPPort, fdryLicenseTrialState=fdryLicenseTrialState, snAgCfgLoad=snAgCfgLoad, snAgentHwICBMCounterWriteDrop=snAgentHwICBMCounterWriteDrop, snAgentBrdMemoryTotal=snAgentBrdMemoryTotal, snAgGblTelnetPassword=snAgGblTelnetPassword, snAgSpBootSeqIpAddr=snAgSpBootSeqIpAddr, snAgentTemp2Value=snAgentTemp2Value, snAgentBrdExpPortTotal=snAgentBrdExpPortTotal, snAgGblQueueOverflow=snAgGblQueueOverflow, snCpuProcessRuntime=snCpuProcessRuntime, snAgTrpRcvrSecurityLevel=snAgTrpRcvrSecurityLevel, snAgTrpRcvrSecurityModel=snAgTrpRcvrSecurityModel, snCAMStatEntry=snCAMStatEntry, fdryLicensePrecedence=fdryLicensePrecedence, snAgentTaskCpuState=snAgentTaskCpuState, brcdSwPackageUpgradeResultImageType=brcdSwPackageUpgradeResultImageType, snStackSyncAllSecSwitch=snStackSyncAllSecSwitch, snAgGblDmaFailure=snAgGblDmaFailure, snAgentBrd2MainBrdId=snAgentBrd2MainBrdId, snAgGblEnableColdStartTrap=snAgGblEnableColdStartTrap, snAgCfgEosPacket=snAgCfgEosPacket, snAgentTemp2SensorId=snAgentTemp2SensorId, snAgentTempThresholdModule=snAgentTempThresholdModule, snAgentTemp2SensorDescr=snAgentTemp2SensorDescr, snAgGblLevelPasswordsMask=snAgGblLevelPasswordsMask, snAgGblEnableWebMgmt=snAgGblEnableWebMgmt, brcdSwPackageUpgradeResultTimeStamp=brcdSwPackageUpgradeResultTimeStamp, snAgTrpRcvrCurEntry=snAgTrpRcvrCurEntry, snAgGblDynMemTotal=snAgGblDynMemTotal, snAgentSysParaConfigDefault=snAgentSysParaConfigDefault, snAgSystemDebugTotalOut=snAgSystemDebugTotalOut, snAgSysLogServerTable=snAgSysLogServerTable, snAgentUserMaxAccnt=snAgentUserMaxAccnt, snChasPwrSupplyOperStatus=snChasPwrSupplyOperStatus, snAgentBrdStatusLeds=snAgentBrdStatusLeds, snCpuProcess5SecUtil=snCpuProcess5SecUtil, snAgentGbl=snAgentGbl, snAgGblCpuUtil1SecAvg=snAgGblCpuUtil1SecAvg, snAgGblEnableSLB=snAgGblEnableSLB, snChasFan=snChasFan, snChasFactoryPartNumber=snChasFactoryPartNumber, snAgentTemp2SlotNum=snAgentTemp2SlotNum, snAgentBrdMemoryAvailable=snAgentBrdMemoryAvailable, snAgentConfigModuleIndex=snAgentConfigModuleIndex, snCamStatFreeL2LowestSection=snCamStatFreeL2LowestSection, snAgSpBootSeqTable=snAgSpBootSeqTable, snAgentTaskCpuTable=snAgentTaskCpuTable, snStackSecSwitchEntry=snStackSecSwitchEntry, brcdSw=brcdSw, snAgGblReadOnlyCommunity=snAgGblReadOnlyCommunity, brcdSwPackageFname=brcdSwPackageFname, snAgentTaskMQMaxMsgs=snAgentTaskMQMaxMsgs, snAgGblResourceLowWarning=snAgGblResourceLowWarning, snAgImgFname=snAgImgFname, snAgentTaskCpuHoldTime=snAgentTaskCpuHoldTime, snAgentUser=snAgentUser, snAgentUserAccntTable=snAgentUserAccntTable, snChasPwrSupply2Entry=snChasPwrSupply2Entry, snChasPwrSupply2Index=snChasPwrSupply2Index, snAgentLog=snAgentLog, brcdSwPackageUpgradeResultDescription=brcdSwPackageUpgradeResultDescription, snAgGblPasswordCheckMode=snAgGblPasswordCheckMode, brcdSwIntfModAutoUpgrade=brcdSwIntfModAutoUpgrade, snCamStatHostLookupCount=snCamStatHostLookupCount, fdryLicenseTrialDays=fdryLicenseTrialDays, snAgentTempThresholdHighValue=snAgentTempThresholdHighValue, fdryLicenseType=fdryLicenseType, snChasUnitPartNum=snChasUnitPartNum, snChasPwr=snChasPwr, snAgentTaskMQLength=snAgentTaskMQLength, snAgentTemp2Entry=snAgentTemp2Entry, fdryLicenseCapacity=fdryLicenseCapacity, snAgentTaskBufferCount=snAgentTaskBufferCount, snStackSmSlotIndex=snStackSmSlotIndex, snAgentUserAccntRowStatus=snAgentUserAccntRowStatus, snChasFan2Description=snChasFan2Description, snAgCfgFname=snAgCfgFname, snAgentHwICBMCounterDMA=snAgentHwICBMCounterDMA, snChasFanTable=snChasFanTable, snAgentBrdUpTime=snAgentBrdUpTime, snAgGblExcessiveErrorWarning=snAgGblExcessiveErrorWarning, snCAMIpStatTable=snCAMIpStatTable, snChasFan2Entry=snChasFan2Entry, snAgentHwICBMCounterSlot=snAgentHwICBMCounterSlot, snAgentTaskMQStickyFailedCount=snAgentTaskMQStickyFailedCount, BrcdImageType=BrcdImageType, snAgSystemDebugDRAMGetError=snAgSystemDebugDRAMGetError, snAgentTempThresholdEntry=snAgentTempThresholdEntry, snAgentTaskMQTaskName=snAgentTaskMQTaskName, snAgentLicense=snAgentLicense, snAgSpBootSeqRowStatus=snAgSpBootSeqRowStatus, snAgentSysParaConfigMin=snAgentSysParaConfigMin, fdryLicenseMode=fdryLicenseMode, brcdSwPackageUpgrade=brcdSwPackageUpgrade, brcdSwIntfModAutoUpgradeMode=brcdSwIntfModAutoUpgradeMode, snAgentTaskCpuWaitTime=snAgentTaskCpuWaitTime, snAgGblCpuUtilData=snAgGblCpuUtilData, fdryLicenseVendorInfo=fdryLicenseVendorInfo, snAgSysLogGblEnable=snAgSysLogGblEnable)
|
Python
|
CL
|
cb8ed3cbd1219bf9dbba6786f3c8900047ecb26a500d8e683cd3a297472fc5c8
|
from pyrocko import io, io_common
from pyrocko import model
from pyrocko.snuffling import Snuffling, Choice, Switch, Param
class ExportWaveforms(Snuffling):
'''
<html>
<head>
<style type="text/css">
body { margin-left:10px };
</style>
</head>
<h1 align="center">Export selected or visible traces</h1>
<body>
<p>
Choose the desired format from the <b>Format</b> menu and press
<b>Run</b>.
If no traces have been selected using extended markers all traces visible
in the viewer will be exported.
</p>
<p>
Note that exporting to miniseed requires the network, station, location and
channel codes to be of length 2, 5, 2 and 3, respectively. Codes exceeding
these lenghts will be silently truncated.<br />
In order to have more control on code replacements it is recommended to use
the command line tool <b>jackseis</b> which is shipped with pyrocko.<br />
When exporting to miniseed it is possible to combine all traces into
one file by giving a filename without template placeholders.
</p>
</body>
</html>
'''
def setup(self):
self.set_name('Export Waveforms')
self.add_parameter(
Choice(
'Format', 'format', 'mseed', ['mseed', 'text', 'sac', 'yaff']))
self.add_parameter(
Param(
'Time length limit for output files', 'tinc', None,
0.1, 86400., low_is_none=True))
self.add_parameter(Switch('Save Station Meta', 'save_stations', False))
self.set_live_update(False)
def call(self):
self.cleanup()
if self.tinc is not None:
template = \
'trace_%n.%s.%l.%c_%(tmin_us)s'
else:
template = 'trace_%n.%s.%l.%c'
if self.format == 'text':
default_output_filename = template + '.dat'
else:
default_output_filename = template + '.' + self.format
out_filename = self.output_filename('Template for output files',
default_output_filename)
for trs in self.chopper_selected_traces(fallback=True, tinc=self.tinc):
trs2save = []
for tr in trs:
if self.format == 'mseed':
if len(tr.network) > 2:
tr.set_network(tr.network[:2])
if len(tr.station) > 5:
tr.set_station(tr.station[:5])
if len(tr.location) > 2:
tr.set_location(tr.location[:2])
if len(tr.channel) > 3:
tr.set_channel(tr.channel[:3])
trs2save.append(tr)
try:
io.save(
trs2save, out_filename,
format=self.format,
overwrite=True)
except io_common.FileSaveError as e:
self.fail(str(e))
if self.save_stations:
stations = self.get_viewer().stations.values()
fn = self.output_filename('Save Stations', 'stations.pf')
model.dump_stations(stations, fn)
def __snufflings__():
return [ExportWaveforms()]
|
Python
|
CL
|
610b1a4feb77e2901dc0c77c1972b8692e01c6a28f07d856773b83e6ca1c445c
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
from typing import * # NoQA
import functools
import hashlib
import io
import os
import pathlib
import tempfile
import edgedb
from edgedb.protocol import dstructs
from edb.common import binwrapper
from . import consts
class DumpImpl:
conn: edgedb.BlockingIOConnection
# Mapping of `schema_object_id` to a list of data block sizes/checksums.
blocks_datainfo: Dict[str, List[Tuple[int, bytes]]]
def __init__(self, conn: edgedb.BlockingIOConnection) -> None:
self.conn = conn
self.blocks_datainfo = {}
def _data_callback(
self,
tmpdir: pathlib.Path,
data: dstructs.DumpDataBlock,
) -> None:
fn = tmpdir / data.schema_object_id.hex
with open(fn, 'ba+') as f:
f.write(data.data)
self.blocks_datainfo.setdefault(data.schema_object_id, []).append(
(
len(data.data),
hashlib.sha1(data.data).digest()
)
)
def _serialize_header(self, desc: dstructs.DumpDesc) -> bytes:
buf = io.BytesIO()
binbuf = binwrapper.BinWrapper(buf)
binbuf.write_ui64(desc.server_ts)
binbuf.write_len32_prefixed_bytes(desc.server_version)
binbuf.write_len32_prefixed_bytes(desc.schema)
binbuf.write_ui64(len(desc.blocks))
for block in desc.blocks:
block_di = self.blocks_datainfo[block.schema_object_id]
if len(block_di) != block.data_blocks_count:
raise RuntimeError(
'server reported data blocks count does not match '
'actual received')
binbuf.write_bytes(block.schema_object_id.bytes)
binbuf.write_ui32(len(block.schema_deps))
for dep in block.schema_deps:
binbuf.write_bytes(dep.bytes)
binbuf.write_len32_prefixed_bytes(block.type_desc)
binbuf.write_ui64(block.data_size)
binbuf.write_ui64(block.data_blocks_count)
total_size = 0
for data_size, data_hash in block_di:
binbuf.write_ui64(data_size)
binbuf.write_bytes(data_hash)
total_size += data_size
if total_size != block.data_size:
raise RuntimeError(
'server reported data block size does not match '
'actual received')
return buf.getvalue()
def dump(self, outfn: os.PathLike) -> None:
with tempfile.TemporaryDirectory() as tmp:
tmpdir = pathlib.Path(tmp)
desc = self.conn._dump(
on_data=functools.partial(self._data_callback, tmpdir))
header = self._serialize_header(desc)
with open(outfn, 'wb+') as outf:
buf = binwrapper.BinWrapper(outf)
buf.write_bytes(consts.HEADER_TITLE)
buf.write_ui64(consts.DUMP_PROTO_VER)
buf.write_bytes(hashlib.sha1(header).digest())
buf.write_ui64(len(header))
buf.write_bytes(header)
for block in desc.blocks:
datafn = tmpdir / block.schema_object_id.hex
with open(datafn, 'br') as dataf:
while True:
data = dataf.read(consts.COPY_BUFFER_SIZE)
if not data:
break
buf.write_bytes(data)
|
Python
|
CL
|
35a9b2c2cce05fb73774535eace8e8264152ccbf7d1722aaa806a5346c1812a4
|
#!/usr/bin/env python
# coding: utf-8
# # Welcome to the final project!
# During this course we covered linear regression and SVM for data analysis. In this notebook we will be working with data about life expectancy across different countries. We use descriptive features based on statistical data to predict life expectancy as a continuous value, and later classify countries to either have a "short" or "long" life expectancy. You will be asked to implement your own Linear Regression model based on the materials given in the lectures. For classification, you will be using an already implememnted SVM classifier from the `sklearn` library.
# In[1]:
#some necessary imports we'll use later
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import sys
sys.path.append("..")
import grading
grader = grading.Grader(assignment_key="-8r0HMXGTwqC5yKZHJrcbw",
all_parts=["HPkMz", "xOP97", "cMnZI"])
# In[113]:
# token expires every 30 min
COURSERA_TOKEN = 'NicXjm0C50Adz07w' # YOUR COURSERA TOKEN HERE (can be found in Programming section)
COURSERA_EMAIL = 'Ahmetova.RN@phystech.edu' # YOUR COURSERA EMAIL HERE
# ## Looking at the data
# First, we need to read the data from a `csv` file. This portion has been done for you using `pandas` library. For more information check out the [documentation](https://pandas.pydata.org/pandas-docs/stable/).
# In[3]:
df = pd.read_csv('dataset.csv')
print("Dataset dimesions: ", df.shape)
# dataset preview
df.head()
# In[4]:
# list all available features
df.columns
# **Feature list:**
# 1. Year
# 2. Status - Developed (1) or Developing (0) status
# 3. Life expectancy - Life Expectancy in age
# 4. Adult Mortality - Adult Mortality Rates of both sexes (probability of dying between 15 and 60 years per 1000 population)
# 5. Infant deaths - Number of Infant Deaths per 1000 population
# 6. Alcohol - Alcohol, recorded per capita (15+) consumption (in litres of pure alcohol)
# 7. Percentage expenditure - Expenditure on health as a percentage of Gross Domestic Product per capita(%)
# 8. Hepatitis B - Hepatitis B (HepB) immunization coverage among 1-year-olds (%)
# 9. Measles - Measles, number of reported cases per 1000 population
# 10. BMI - Average Body Mass Index of entire population
# 11. Under-five deaths - Number of under-five deaths per 1000 population
# 12. Polio - Polio (Pol3) immunization coverage among 1-year-olds (%)
# 13. Total expenditure - General government expenditure on health as a percentage of total government expenditure (%)
# 14. Diphtheria - Diphtheria tetanus toxoid and pertussis (DTP3) immunization coverage among 1-year-olds (%)
# 15. HIV/AIDS - Deaths per 1 000 live births HIV/AIDS (0-4 years)
# 16. GDP - Gross Domestic Product per capita (in USD)
# 17. Population - Population of the country
# 18. Thinness 1-19 years - Prevalence of thinness among children and adolescents for Age 10 to 19 (% )
# 19. Thinness 5-9 years - Prevalence of thinness among children for Age 5 to 9(%)
# 20. Income composition of resources - Human Development Index in terms of income composition of resources (index ranging from 0 to 1)
# 21. Schooling - Number of years of Schooling(years)
#
#
# **Target**: Life expectancy
# In[40]:
target_feature = 'life_expectancy'
# In[6]:
plt.figure(figsize=(15,15))
a, b = 5, 5
for i, col in enumerate(df.columns[df.columns !=target_feature]):
plt.subplot(b, a, i+1)
plt.scatter(df[col], df[target_feature])
plt.title(col)
plt.tight_layout()
# Based on the data plots above, which features do you think will contribute to good results of linear regression the most?
# ## Linear Regression
# ### Split data to train and test
# We want our model to not be biased towards ceratin data, so we will train the model on one set of data and test on another. This is done in order to evaluate how well the model performs on previously "unseen" values. This data separation has been done for you using the `train_test_split` method. The size of the test dataset is 20% of the total, and we define `random_state` to get the same consistent results when running this code.
# In[115]:
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(df, test_size = 0.2, random_state = 42)
# ### Writing linear regression
#
# Linear regression is a linear approximation $$f(x)=w_0 + w_1x_1 + ... +w_nx_n$$
#
# As we recall from the lecture the analytical solution for an inconsistent system of $Xw=y$ is
#
# $$\hat{w} = (X^TX)^{-1}X^Ty$$
#
# In our case the matrix $X$ is the matrix, where each row is an object (person) and each column is a feature. $y$ is the target value - vector of life expectancy values. $\hat{w}$ is the approximate solution.
#
# In the class below create a linear regression class. `AlgLinearRegression` has to have methods for training on dataset and predicting values. Those methods have been outlined for you. Don't forget to add the dummy variable for the scalar intercept ($w_0$)!
#
# **Hint**: You can use `np.hstack` to append the mock variable (vector of ones) for scalar intercept. Vector of ones can be created using `np.ones` method. Do not forget to do this in both `fit` and `predict` methods.
# In[116]:
class AlgLinearRegression():
def __init__(self, fit_intercept = True):
self.coef_ = []
def fit(self, X, y):
epochs=200000
learning_rate=0.001
'''
This method takes the training data and calculate the approximate solution w (self.coef).
It will later be used to predict values for new data.
self - reference parameter to class instance.
X - matrix of features.
y - vector of target varibles.
Returns - self.
'''
#your code here
n=X.shape[1]
self.coef_=np.zeros(n+1)
m = float(len(y))
x = np.hstack((np.ones((int(m), 1)), X))
for i in range(epochs):
prediction =x.dot(self.coef_)
delta=prediction-y
current_gradient=delta.dot(x)
current_gradient*=1/m
self.coef_=self.coef_-current_gradient*learning_rate
'''if i%1000==0:
print(i)'''
return self
def predict(self, X):
'''
This method takes new data and applies the self.coef (calculated in fit) to it to get the new target predictions.
self - reference parameter to class instance.
X - matrix of features.
Returns - predicted vector of target values.
'''
#your code here
m=X.shape[0]
x = np.hstack((np.ones((int(m), 1)), X))
y_pred=np.dot(x, self.coef_)
return y_pred
# Train and test your regressor using one feature, `schooling`, first. `X_train` and `y_train` are for fitting the regressor, `X_test` for predicting values, and finally `y_test` is for assessing quality. Use mean squared error as the quality metric (see [here](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html)).
# $$MSE (y\_true, y\_pred) = \frac{1}{n}\sum_{i=1}^n (y\_true_i - y\_pred_i)^2$$
# In[117]:
X_train = df_train['schooling'].values.reshape(-1,1)
X_test = df_test['schooling'].values.reshape(-1,1)
y_train = df_train[target_feature].values
y_test = df_test[target_feature].values
# In[118]:
#your code here
reg =AlgLinearRegression()
reg.fit(X_train, y_train)
y_pred=reg.predict(X_test)
# In[119]:
from sklearn.metrics import mean_squared_error
#calculate error (your code here)
ans1 = mean_squared_error(y_test, y_pred)
ans1
# In[120]:
#visualize the constructed line
plt.scatter(X_test, y_test, alpha=0.8)
plt.plot(X_test, y_pred, c='r')
plt.title('One feature AlgLinearRegression')
plt.show()
# Is it a good result? Think of ways we can improve the predictions.
# In[121]:
## GRADED PART, DO NOT CHANGE!
grader.set_answer("HPkMz", ans1)
# In[122]:
# you can make submission with answers so far to check yourself at this stage
grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)
# ### Now let's use other features and see if it helps decrease error
# Use the list of features (+ traget value) defined in `features` variable below to train you regressor.
# In[106]:
#features = ['BMI', 'life_expectancy']
features = ['status', 'BMI', 'total_expenditure',
'HIV/AIDS', ' thinness 5-9 years', 'income_composition_of_resources', 'schooling', 'life_expectancy']
# In[107]:
df_train, df_test = train_test_split(df[features], test_size = 0.2, random_state = 42)
X_train = df_train.drop([target_feature], axis=1).values
X_test = df_test.drop([target_feature], axis=1).values
y_train = df_train[target_feature].values
y_test = df_test[target_feature].values
# In[108]:
# your code here
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(X_train, y_train)
#y_pred=reg.predict(X_test)
# In[109]:
y_pred=reg.predict(X_test)
# In[110]:
# calcualte error (your code here)
ans2 = mean_squared_error(y_test, y_pred)
ans2
# In[111]:
## GRADED PART, DO NOT CHANGE!
grader.set_answer("xOP97", ans2)
# In[114]:
# you can make submission with answers so far to check yourself at this stage
grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)
# Let's plot the difference between our predictions and true values. What does it say about our regression model? How can the plot below suggest ways to improve the predictions?
# In[25]:
#plot difference in predictions on all objects
plot_range = len(y_pred)
plt.scatter(np.arange(0, plot_range), y_pred-y_test, alpha=0.8)
plt.plot(np.arange(0, plot_range), np.zeros((plot_range,)), c='g')
plt.title('Differenece in prediction and true value for multi feature AlgLinearRegression')
plt.show()
# In[26]:
#plot underpredictions vs. over predictions distribution
plt.hist(y_pred-y_test, bins=20)
plt.axvline(0, c='y')
plt.show()
# Plotting feature importance
# In[27]:
plt.figure(figsize=(20,10))
names = features[:-1] + ['free var']
plt.bar(np.arange(len(names)), reg.coef_)
plt.xticks(range(len(names)), names, rotation='25')
plt.show()
# ## SVM
# In this section of the notebook you will be asked to use `SVM` classifier to split our data in two classes. In the lectures we covered SVM with linear kernel.
#
# <img src="SVM_illustration.png" style="width:60%">
#
# The idea of SVM is to draw a hyperplane so that the separation between classes (two, in our case) is maximum, and then use this hylerplane to determine the class for new objects.
#
# [Image source and more info](https://scikit-learn.org/stable/modules/svm.html).
# ### Now let's train a classifier to divide our data into two categories as established below:
# Let's say that a person is expected to live a long life if his life expectancy is more than 80 years.
# In[9]:
TSLD = 80
# Our new target value is the class: let us put the class to 0 if the life expectancy is short, and to 1 if it is long.
# In[13]:
df_class = df[features].copy()
df_class['long_life'] = np.where(df_class[target_feature] >= TSLD, 1, 0)
short_life = df_class[target_feature][df_class[target_feature] < TSLD]
long_life = df_class[target_feature][df[target_feature] >= TSLD]
# drop the old target variable
df_class = df_class.drop([target_feature], axis=1)
print("Short to long life expectancy ratio: ", np.round(short_life.shape[0]/df.shape[0],2),':',
np.round(long_life.shape[0]/df.shape[0],2))
# In[14]:
#new data preview
df_class.head()
# ### Training and evaluating model
# In this part of the notebook we will be using an already implemented SVM model from `sklearn`. Use linear kernel.
# In[15]:
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# Same as with linear regression we need to split our data into train and test samples to later evaluate the quality of the classifier. Use [accuracy score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html#sklearn.metrics.accuracy_score) as the quality metric here.
#
# $$accuracy(y\_true, y\_pred) = \frac{1}{n}\sum_{i=1}^n [y\_true=y\_pred],$$ where $[a=b]=1$, if $a=b$, and $0$ otherwise.
# In[16]:
df_train, df_test = train_test_split(df_class, test_size = 0.2, random_state = 42)
# In[17]:
#separating target variable from features
X_train = df_train.drop(['long_life'], axis=1)
X_test = df_test.drop(['long_life'], axis=1)
y_train = df_train['long_life']
y_test = df_test['long_life']
# In[27]:
# train classifier (your code here)
clf = SVC(kernel='linear')
clf.fit(X_train, y_train)
# In[28]:
y_pred=clf.predict(X_test)
# In[29]:
# evaluate results (your code here)
ans3 = accuracy_score(y_test, y_pred)
ans3
# In[30]:
## GRADED PART, DO NOT CHANGE!
grader.set_answer("cMnZI", ans3)
# In[31]:
# you can make submission with answers so far to check yourself at this stage
grader.submit(COURSERA_EMAIL, COURSERA_TOKEN)
# For linear kernel we can extract feature importances:
# In[32]:
# plot impact for each feature to the classification
plt.figure(figsize=(20,10))
names = X_train.columns
plt.bar(np.arange(len(names)), clf.coef_[0])
plt.xticks(range(len(names)), names)
plt.title('Feature importnaces for SVM')
plt.show()
# What can you tell from the plot above? Which features are the most important in predicting the label for an object?
|
Python
|
CL
|
55f0809d3a9eca1b330988f98fd09559c73a6981ca851377c7817fb50517af16
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script to help building Syzygy-instrumented Chrome binaries."""
import logging
import optparse
import os
import shutil
import subprocess
import sys
# The default instrument executable to use to instrument binaries.
_DEFAULT_INSTRUMENTER = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../../../..',
'third_party/syzygy/binaries/exe/instrument.exe'))
_LOGGER = logging.getLogger()
def _Shell(*cmd, **kw):
"""Shells out to "cmd". Returns a tuple of cmd's stdout, stderr."""
_LOGGER.info('Running command "%s".', cmd)
prog = subprocess.Popen(cmd, **kw)
stdout, stderr = prog.communicate()
if prog.returncode != 0:
raise RuntimeError('Command "%s" returned %d.' % (cmd, prog.returncode))
return stdout, stderr
def _InstrumentBinary(instrument_exe, mode, executable, symbol, dst_dir):
"""Instruments the executable found in input_dir, and writes the resultant
instrumented executable and symbol files to dst_dir.
"""
cmd = [instrument_exe,
'--overwrite',
'--mode=%s' % mode,
'--input-image=%s' % executable,
'--input-pdb=%s' % symbol,
'--output-image=%s' % os.path.abspath(
os.path.join(dst_dir, os.path.basename(executable))),
'--output-pdb=%s' % os.path.abspath(
os.path.join(dst_dir, os.path.basename(symbol))),]
return _Shell(*cmd)
def _CopyAgentDLL(agent_dll, destination_dir):
"""Copy the agent DLL to the destination directory."""
dirname, agent_name = os.path.split(agent_dll);
agent_dst_name = os.path.join(destination_dir, agent_name);
shutil.copyfile(agent_dll, agent_dst_name)
def main(options):
logging.basicConfig(level=logging.INFO)
# Make sure the destination directory exists.
if not os.path.isdir(options.destination_dir):
_LOGGER.info('Creating destination directory "%s".',
options.destination_dir)
os.makedirs(options.destination_dir)
# Instruments the binaries into the destination directory.
_InstrumentBinary(options.instrumenter,
options.mode,
options.input_executable,
options.input_symbol,
options.destination_dir)
# Copy the agent DLL to the destination directory.
_CopyAgentDLL(options.agent_dll, options.destination_dir);
def _ParseOptions():
option_parser = optparse.OptionParser()
option_parser.add_option('--input_executable',
help='The path to the input executable.')
option_parser.add_option('--input_symbol',
help='The path to the input symbol file.')
option_parser.add_option('--mode',
help='Specifies which instrumentation mode is to be used.')
option_parser.add_option('--agent_dll',
help='The agent DLL used by this instrumentation.')
option_parser.add_option('--instrumenter', default=_DEFAULT_INSTRUMENTER,
help='Instrumenter executable to use, defaults to "%s"'
% _DEFAULT_INSTRUMENTER)
option_parser.add_option('-d', '--destination_dir',
help='Destination directory for instrumented files, defaults to '
'the subdirectory "instrumented" in the output_dir.')
options, args = option_parser.parse_args()
if not options.mode:
option_parser.error('You must provide an instrumentation mode.')
if not options.agent_dll:
option_parser.error('You must provide an agent DLL.')
if not options.input_executable:
option_parser.error('You must provide an input executable.')
if not options.input_symbol:
option_parser.error('You must provide an input symbol file.')
if not options.destination_dir:
options.destination_dir = os.path.join(options.output_dir, 'instrumented')
return options
if '__main__' == __name__:
sys.exit(main(_ParseOptions()))
|
Python
|
CL
|
1e1374c083499bf3f51c072194adb01594287d37e054bd03c12fccfb8974c918
|
from pathlib import Path
from typing import Dict
from util import _get, _to_absolute_path, _error_if_ukn, _load_mod
from errors import FlootSpecSyntaxError
from spec.spec_item import SpecItem
class FunctionMapper(dict, SpecItem):
""" Used for transformers and comperators
transformers:
source: run/transformers.py
names:
tabletransformer: tabletransformer"""
ITEMS = ['source', 'names']
@classmethod
def parse(cls, spec_path: Path, content: Dict, path: str) -> 'FunctionMapper':
_error_if_ukn(content, path, FunctionMapper.ITEMS)
source_path = _to_absolute_path(spec_path, Path(_get(content, f'{path}.source', T=str)))
if not source_path.is_file():
raise FlootSpecSyntaxError(f'expected {path}.source to contain a existing file path. Found {source_path}')
self = FunctionMapper()
mod = _load_mod(source_path)
for name, method_name in _get(content, f'{path}.names', T=dict).items():
if not hasattr(mod, method_name):
raise FlootSpecSyntaxError(f'The method {method_name} specified {path}.{name} does not exist in source definition of {path}')
self[name] = getattr(mod, method_name)
return self
|
Python
|
CL
|
c892ce4f084a379bef57ad7d60398b8400d5a5f129efd9b1d31dfa06765f7056
|
# vim: set ts=4 sw=4 et:
#
# Copyright (C) 2008 Novell, Inc.
#
# Authors: Vincent Untz <vuntz@gnome.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
import optparse
import sys
import gmenu
def print_entry(entry, path):
if entry.get_is_excluded():
excluded = ' <excluded>'
else:
excluded = ''
print '%s\t%s\t%s%s' % (path, entry.get_desktop_file_id(), entry.get_desktop_file_path(), excluded)
def print_directory(dir, parent_path = None):
if not parent_path:
path = '/'
else:
path = '%s%s/' % (parent_path, dir.get_name())
for item in dir.get_contents():
type = item.get_type()
if type == gmenu.TYPE_ENTRY:
print_entry(item, path)
elif type == gmenu.TYPE_DIRECTORY:
print_directory(item, path)
elif type == gmenu.TYPE_ALIAS:
aliased = item.get_item()
if aliased.get_type() == gmenu.TYPE_ENTRY:
print_entry(aliased, path)
elif type in [ gmenu.TYPE_HEADER, gmenu.TYPE_SEPARATOR ]:
pass
else:
print >> sys.stderr, 'Unsupported item type: %s' % type
def main(args):
parser = optparse.OptionParser()
parser.add_option('-f', '--file', dest='file',
help='Menu file')
parser.add_option('-i', '--include-excluded', dest='exclude',
action='store_true', default=False,
help='Include <Exclude>d entries')
parser.add_option('-n', '--include-nodisplay', dest='nodisplay',
action='store_true', default=False,
help='Include NoDisplay=true entries')
(options, args) = parser.parse_args()
if options.file:
menu_file = options.file
else:
menu_file = 'applications.menu'
flags = gmenu.FLAGS_NONE
if options.exclude:
flags |= gmenu.FLAGS_INCLUDE_EXCLUDED
if options.nodisplay:
flags |= gmenu.FLAGS_INCLUDE_NODISPLAY
tree = gmenu.lookup_tree(menu_file, flags)
root = tree.get_root_directory()
if not root:
print 'Menu tree is empty'
else:
print_directory(root)
if __name__ == '__main__':
try:
main(sys.argv)
except KeyboardInterrupt:
pass
|
Python
|
CL
|
372f805f3fadcf684f881da2318b6e7959ff0a83feac22490b8de7cf15c7421f
|
from mopy.factories.basicroom import BasicRoom
from mopy.runners import Scheduler
from mopy.entity import Entity, Text, TextAlignment, Sprite, TextView
from mopy.camera import OrthoCamera
from mopy.components import HotSpot, HotSpotManager, Gfx, Cursor
from mopy.shapes import Rect
import mopy.monkey
import mopy.engine
from mopy.script import Script
import example
from mopy.actions import Walk
import mopy.scumm as scumm
from mopy.factories.interface import *
from mopy.factories.scumm_item import create_dynamic
import math
def toggle_cursor(x, y):
gl = mopy.monkey.engine.data.globals
gl.current_action += 1
if gl.current_action >= len(gl.actions):
gl.current_action = 0
if gl.actions[gl.current_action] == -1:
gl.current_action = 0
example.get('cursor').setAnim(gl.actions[gl.current_action])
class ScummRoom(BasicRoom):
def __init__(self, desc):
super().__init__(desc)
self.add_runner(Scheduler())
self.add_runner({
'type': 'runner.collisionengine',
'size': [80, 80],
'response': [
{'tag1': 1, 'tag2': 2, 'on_enter': on_enter_collision_area, 'on_leave': on_leave_collision_area}]
})
def load_dynamic_items(self):
print('loadin dynamic')
for r in mopy.monkey.engine.data.r2i.get(self.id, []):
print('okk')
entity = create_dynamic(r)
if entity:
item = mopy.monkey.engine.data.items.get(r)
self.add(entity, item.get('parent', self.default_item))
def map_room(desc: dict):
room = ScummRoom(desc)
room.add_runner(Scheduler())
# read world size
width = desc['width']
height = desc['height']
device_size = mopy.monkey.engine.device_size
cam_width = device_size[0]
cam_height = device_size[1]
# # add the main node
room.default_item = 'main'
main = Entity(tag='main')
main.camera = OrthoCamera(width, height, cam_width, cam_height, [0, 0, cam_width, cam_height], tag='maincam')
main.add_component(HotSpotManager(lmbclick=walk_to))
room.add(main)
# add static items
room.add_items(desc)
# add dynamic items
room.load_dynamic_items()
return room
def dialogue_room(desc: dict):
gl = mopy.monkey.engine.data.globals
room = ScummRoom(desc)
room.add_runner(Scheduler())
room.init.append([refresh_inventory])
# read world size
width = desc['width']
height = desc['height']
device_size = mopy.monkey.engine.device_size
cam_width = device_size[0]
cam_height = device_size[1] - gl.ui_height
# add the main node
room.default_item = 'main'
main = Entity(tag='main')
main.camera = OrthoCamera(width, height, cam_width, cam_height, [0, gl.ui_height, cam_width, cam_height], tag='maincam')
room.add(main)
# add the ui node
ui = Entity(tag='ui')
ui.camera = OrthoCamera(cam_width, gl.ui_height, cam_width, gl.ui_height, [0, 0, cam_width, gl.ui_height], tag='uicam')
room.add(ui)
# dialogue node
dialogue_node = TextView(factory=make_dialogue_button, pos=(0, 0), size=(320, 56),
font_size=8, lines=7, delta_x=26, tag='dialogue')
dialogue_node.add_component(HotSpotManager())
room.add(dialogue_node)
inventory_node = Entity(tag='inventory')
ui.add(inventory_node)
# add static items
room.add_items(desc)
room.load_dynamic_items()
return room
def sierra_room(desc: dict):
gl = mopy.monkey.engine.data.globals
room = ScummRoom(desc)
# read world size
width = desc['width']
height = desc['height']
device_size = mopy.monkey.engine.device_size
cam_width = device_size[0]
cam_height = device_size[1]
# add the main node
room.default_item = 'main'
main = Entity(tag='main')
main.camera = OrthoCamera(width, height, gl.sci_viewport[2], gl.sci_viewport[3], gl.sci_viewport, tag='maincam')
main.add_component(HotSpotManager(lmbclick=sierra_walk_to, rmbclick=toggle_cursor))
room.add(main)
# add the ui node
ui = Entity(tag='ui')
ui.camera = OrthoCamera(cam_width, cam_height, cam_width, cam_height, [0, 0, cam_width, cam_height])
ui.add(Text(text=mopy.monkey.engine.title, color=gl.ui_txt_color, pos=(0, cam_height), align=TextAlignment.top_left, font=gl.msg_font, size=8))
ui.add(Text(text=str(gl.score) + ' of ' + str(gl.max_score), color=gl.ui_txt_color, pos=(cam_width, cam_height), align=TextAlignment.top_right, font=gl.msg_font, size=8))
room.add(ui)
a = Sprite(model='01.cursor', tag='cursor')
a.add_component(Cursor())
a.pos = (0, 0, 5)
main.add(a)
# add static items
room.add_items(desc)
# add dynamic items
room.load_dynamic_items()
return room
def map_room(desc: dict):
gl = mopy.monkey.engine.data.globals
room = ScummRoom(desc)
room.add_runner(Scheduler())
width = desc['width']
height = desc['height']
device_size = mopy.monkey.engine.device_size
cam_width = device_size[0]
cam_height = device_size[1]
# add the main node
room.default_item = 'main'
main = Entity(tag='main')
main.camera = OrthoCamera(width, height, cam_width, cam_height, [0, 0, cam_width, cam_height], tag='maincam')
main.add_component(HotSpotManager(lmbclick=walk_to))
room.add(main)
cursor = Text(font=gl.default_font, size=8, text='#', color=(255, 255, 255, 255), tag='_cursor')
cursor.add_component(Cursor())
main.add(cursor)
# add static items
room.add_items(desc)
# add dynamic items
room.load_dynamic_items()
return room
def default_room(desc: dict):
gl = mopy.monkey.engine.data.globals
room = ScummRoom(desc)
room.add_runner(Scheduler())
room.init.append([refresh_inventory])
# read world size
width = desc['width']
height = desc['height']
device_size = mopy.monkey.engine.device_size
cam_width = device_size[0]
cam_height = device_size[1] - gl.ui_height
# add the main node
room.default_item = 'main'
main = Entity(tag='main')
main.camera = OrthoCamera(width, height, cam_width, cam_height, [0, gl.ui_height, cam_width, cam_height], tag='maincam')
main.add_component(HotSpotManager(lmbclick=walk_to))
room.add(main)
# get the verb set from the description. If not specified, verb set 0 will be used
verb_set = desc.get('verb_set', 0)
vset = gl.verb_sets[verb_set]
dv = gl.verbs[vset['default_verb']]
gl.current_verb = vset['default_verb']
gl.current_item_1 = ''
gl.current_item_2 = ''
# add the ui node
ui = Entity(tag='ui')
ui.camera = OrthoCamera(cam_width, gl.ui_height, cam_width, gl.ui_height, [0, 0, cam_width, gl.ui_height], tag='uicam')
ui.add(Text(font='fonts.ui', size=gl.font_size, text=mopy.monkey.engine.read(dv['text']), color=gl.Colors.current_action,
align=TextAlignment.bottom, tag='current_verb', pos=(cam_width / 2, 48, 0)))
ui.add_component(HotSpotManager())
cy = gl.ui_height - 2 * gl.font_size
count = 0
shift = 0
shift_applied = 46
for i in vset['verbs']:
cx = (count // 4) * shift_applied
cy = gl.ui_height - (2 + count % 4) * gl.font_size
e = make_verb_button(i, (cx, cy, 0))
shift = max(shift, 1 + len(mopy.monkey.engine.read(gl.verbs[i]['text'])))
ui.add(e)
count += 1
room.add(ui)
# inventory node
inventory_node = TextView(factory=make_inventory_button, pos=(160, 0), size=(160, 48),
font_size=8, lines=6, delta_x=26, tag='inventory')
inventory_node.add_component(HotSpotManager())
ui.add(inventory_node)
# dialogue node
dialogue_node = TextView(factory=make_dialogue_button, pos=(0, 0), size=(320, 56),
font_size=8, lines=7, delta_x=26, tag='dialogue')
dialogue_node.add_component(HotSpotManager())
room.add(dialogue_node)
# add static items
room.add_items(desc)
# add dynamic items
room.load_dynamic_items()
# print (' ### looking up for dynamic items in room ' + desc['id'])
# for r in mopy.monkey.engine.data.r2i.get(desc['id'], []):
# print('QUI')
# entity = create_dynamic(r)
# item = mopy.monkey.engine.data.items.get(r)
# print(item.get('parent', room.default_item) +' facomi')
# room.add(entity, item.get('parent', room.default_item))
return room
def walkarea(data):
e = Entity()
e.tag = 'walkarea_' + str(data.get('id', 0))
walkarea = {
'type': 'components.walkarea',
'depth': data.get('depth', None),
'scale': data.get('scale', None),
'walls': data.get('walls', None)
}
if 'poly' in data:
walkarea['shape'] = {
'type': 'shape.polygon',
'outline': data['poly'],
'holes': data.get('holes')
}
else:
walkarea['shape'] = {
'type': 'shape.polyline',
'nodes': data['nodes'],
'edges': data['edges']
}
e.add_component(walkarea)
return e
def bg(data):
e = Entity()
if 'image' in data:
e.model = {
'type': 'model.rect',
'tex': data['image'],
'scale': data.get('scale', (1, 1)),
'repeat': data.get('repeat', (1, 1)),
}
else:
e.model = data['model']
e.layer = data.get('layer', 0)
return e
def bg_ps3D(data):
e = bg(data)
# check if epos is set
# if 'epos' in data:
# ep = data['epos']
# e.pos = [ep[0], 0, -math.sqrt(2.0) * ep[1]]
# else:
# e.pos = data['pos']
#
# e.pos[1] += e.pos[2] / math.sqrt(2)
sq = math.sqrt(2) * 0.5
if 'parallax' in data:
p0 = data['parallax']['initial_position']
#p0[1] += sq * e.pos[2]
device = mopy.monkey.engine.device_size
e.add_component({
'type': 'components.parallax',
'cam': 'maincam',
'factor': data['parallax']['factor'],
'cam0': [device[0] * 0.5, device[1] * 0.5],
'pos0': p0
})
return e
# auto size = t.get<glm::vec2>("size");
# auto offset = t.get<glm::vec2>("offset", glm::vec2(0.0f));
# float width = size[0];
# float height = size[1];
# auto tex = t.get<std::string>("tex", "");
# auto render = t.get<std::string>("render", "fill");
# auto color = t.get<glm::vec4> ("color", glm::vec4(1.0f));
# auto rtype = (render == "fill" ? RenderType::FILL : RenderType::WIREFRAME);
# auto repeat = t.get<glm::vec2>("repeat", glm::vec2(1.0f, 1.0f));
# return rect(width, height, offset, rtype, color, tex, repeat);
# e.add_component(Gfx(image=data['image']))
return e
|
Python
|
CL
|
bc07bc8026abc9cfe22f963417c8a6a42fa8fe9a1f203db463ca64307419bd02
|
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import time
from webdriver_manager.chrome import ChromeDriverManager
test_url = "https://datastudio.google.com/u/0/reporting/fc733b10-9744-4a72-a502-92290f608571/page/70YCB" #This is the Data-studio link we will be testing
test_lines = [] # Contains all of the test results gathered from selenium
correct_lines = [] # Contains all of the desired (correct) results
correct_lines.append('This dashboard shows the results of Google Cloud Inter-Region latency and throughput benchmarks. ') #The first test will involve checking whether the title of the report appears
for x in range(8):
correct_lines.append('sending_region') #The rest of the tests will check whether the sending_region filter has been loaded on all 8 tables
options = webdriver.ChromeOptions()
options.add_argument("--log-level=3") #hides uneccesary messages
options.add_argument("headless") #prevents a giant pop-up browser from appearing on screen
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
def execute_test():
global test_lines #Declaring Test Results as Global Variables
global temp
try:
driver.get(test_url) #Running URL on Chrome Driver
time.sleep(3)
#Execute First Test
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(20) > canvas-component > div > div > div > div > textbox > div > div:nth-child(1) > div:nth-child(1) > font > font').text
except NoSuchElementException:
print("Loading of Report Failed")
return False
test_lines.append(temp)
driver.get(test_url) #Reload Webpage
time.sleep(5)
#Execute Second Test
try:
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(10) > canvas-component > div > div > div > div > dimension-filter-control > simple-dimension-filter > control-layout-wrapper > button > div > span.lego-control-section.label > span.main-text > main-section').text
except NoSuchElementException:
print("Test 2 Failed to Execute (Table 1, Page 1)")
return False
test_lines.append(temp)
#Execute Third Test
try:
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(17) > canvas-component > div > div > div > div > dimension-filter-control > simple-dimension-filter > control-layout-wrapper > button > div > span.lego-control-section.label > span.main-text > main-section').text
except NoSuchElementException:
print("Test 3 Failed to Execute (Table 2, Page 1)")
return False
test_lines.append(temp)
#Enter Next Page
driver.find_element_by_css_selector('#reporting-app-header > md-toolbar > div > div.ng-scope.flex > page-navigation > div > div > div.ng-scope > div > span.navBtn.nextBtn').click()
time.sleep(5)
#Execute Fourth Test
try:
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(11) > canvas-component > div > div > div > div > dimension-filter-control > simple-dimension-filter > control-layout-wrapper > button > div > span.lego-control-section.label > span.main-text > main-section').text
except NoSuchElementException:
print("Test 4 Failed to Execute (Table 1, Page 2)")
return False
test_lines.append(temp)
#Execute Fifth Test
try:
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(21) > canvas-component > div > div > div > div > dimension-filter-control > simple-dimension-filter > control-layout-wrapper > button > div > span.lego-control-section.label > span.main-text > main-section').text
except NoSuchElementException:
print("Test 5 Failed to Execute (Table 2, Page 2)")
return False
test_lines.append(temp)
#Execute Sixth Test
try:
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(28) > canvas-component > div > div > div > div > dimension-filter-control > simple-dimension-filter > control-layout-wrapper > button > div > span.lego-control-section.label > span.main-text > main-section').text
except NoSuchElementException:
print("Test 6 Failed to Execute (Table 3, Page 2)")
return False
test_lines.append(temp)
#Enter Next Page
driver.find_element_by_css_selector('#reporting-app-header > md-toolbar > div > div.ng-scope.flex > page-navigation > div > div > div.ng-scope > div > span.navBtn.nextBtn').click()
time.sleep(5)
#Execute Seventh Test
try:
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(11) > canvas-component > div > div > div > div > dimension-filter-control > simple-dimension-filter > control-layout-wrapper > button > div > span.lego-control-section.label > span.main-text > main-section').text
except NoSuchElementException:
print("Test 7 Failed to Execute (Table 1, Page 3)")
return False
test_lines.append(temp)
#Execute Eighth Test
try:
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(22) > canvas-component > div > div > div > div > dimension-filter-control > simple-dimension-filter > control-layout-wrapper > button > div > span.lego-control-section.label > span.main-text > main-section').text
except NoSuchElementException:
print("Test 8 Failed to Execute (Table 2, Page 3)")
return False
test_lines.append(temp)
#Execute Ninth Test
try:
temp = driver.find_element_by_css_selector('#body > div > div > div.lego-reporting-view.activity-view.no-licensed > div.page > div > div.mainBlock.ng-scope > div > div.scaleSizeHolder > div > lego-report > lego-canvas-container > div > file-drop-zone > span > content-section > div:nth-child(30) > canvas-component > div > div > div > div > dimension-filter-control > simple-dimension-filter > control-layout-wrapper > button > div > span.lego-control-section.label > span.main-text > main-section').text
except NoSuchElementException:
print("Test 9 Failed to Execute (Table 3, Page 4)")
return False
test_lines.append(temp)
driver.quit()
def confirm_page():
state = 0
try:
if test_lines[0] != correct_lines[0]:
print("Report Failed to Meet Loading Requirements")
state += 1
except IndexError:
print("Report Failed to Meet Loading Requirements")
return False
try:
for y in range(8):
z = y + 1
if test_lines[z] != correct_lines[z]:
print("Table "+str(z)+" Encountered a Disruption")
state += 1
except IndexError:
print("\nPlease Tend to Faulty Table Listed Above.")
return False
if state == 0:
print("\nPage is Up and Running!\n")
return True
else:
print("\nTotal Number of Errors: " + str(state))
return False
execute_test() # Executes all 9 tests
confirm_page() # Compares results gathered to desired results
|
Python
|
CL
|
b41beba84dae517a6dea8abf2173d5babd7e6923c6ee7c792a0094a38af3a51a
|
"""
Author: Yaan Dalzell
Created: 24/01/2021
License: All contents are the intellectual property of Yaan Dalzell
Not to be used in any manner without written consent
Description: Scrapes data from the given url
Last Valid On 24/01/2021
"""
from selenium import webdriver as web
from selenium.webdriver.firefox.options import Options
import sys
import json
from datetime import datetime
# Define target url
target_url = "http://www.marketindex.com.au/asx-listed-companies"
# Web Driver
options = Options()
options.headless = True
driver = web.Firefox(options = options)
# Set ExtractDate
extract_date_time = str(datetime.now())
driver.get(target_url)
table = driver.find_element_by_tag_name("table")
if table:
# Get header information
header = table.find_element_by_tag_name("thead")
extracted_fields = [field.text for field in header.find_elements_by_tag_name("th")]
# Check that header fields are all present and accounted for
expected_fields = ['Rank', '', 'Code', 'Company', 'Price', '% Chg', 'Market Cap', '1 Year']
# Convert to sets for symmetric difference
extracted_set = set(extracted_fields)
expected_set = set(expected_fields)
if extracted_set.symmetric_difference(expected_set):
print(extracted_set.symmetric_difference(expected_set))
raise Exception('Extracted fields are not as expected. Please investigate')
# Incase the table order has changed (Happened before)
code_field = extracted_fields.index("Code")
company_field = extracted_fields.index("Company")
price_field = extracted_fields.index("Price")
percent_field = extracted_fields.index("% Chg")
percent_year_field = extracted_fields.index("1 Year")
cap_field = extracted_fields.index("Market Cap")
# Discard Junk fields
keys = extracted_fields[2:]
# Get tables rows (excluding header)
body = table.find_element_by_tag_name("tbody")
rows = body.find_elements_by_tag_name("tr")
outputs = []
for row in rows:
row_values = row.find_elements_by_tag_name("td")
values = [value.text for value in row_values]
temp = {
"ExtractDateTime": extract_date_time,
"CompanyCode": values[code_field],
"CompanyName": values[company_field],
"LastPrice": values[price_field],
"PercentChange": values[percent_field],
"PercentChangeYear": values[percent_year_field],
"MarketCap": values[cap_field]
}
outputs.append(temp)
# Return row data as a string
# for row in rows:
# facts = [fact.text for fact in row.find_elements_by_tag_name("td")]
# print(outputs)
sys.stdout.write("{0}\n".format(json.dumps(outputs, sort_keys=True, indent=5)))
driver.quit()
|
Python
|
CL
|
4fc0f19943ef163c7f7419ac94c720a8b5b26b5e3d46dad54c5d195ada45ad5b
|
from django.shortcuts import render, redirect
from django.http import JsonResponse
from django.core.urlresolvers import reverse
from utils.mixin import LoginRequiredMixin
from django.views.generic import View
from django_redis import get_redis_connection
from apps.goods.models import GoodsSKU
# Create your views here.
# 购物车: 跳转到购物车页面; 添加购物车
# 1 跳转购物车页面 页面需要用户先登陆才能跳转 /cart/
# 自动跳转登录页面, loginrequired() 这个函数做了
class CartInfoView(LoginRequiredMixin, View):
def get(self, request):
# 显示购物车页面
# 获取登陆用户
user = request.user
# 从redis中获取用户购物车记录信息 cart_id {sku_id:num, sku_id:num, ...}
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
cart_dict = con.hgetall(cart_key) # hgetall得到的是字典{sku_id:num, skku_id:num, ...}, 保存有商品id和数量
skus = [] # 购物车的商品组成的列表
total_count = 0 # 假设初始总数量为0
total_price = 0 # 初始价格为0
# cart_dict.items() 方法 得到的是字典的键值对组成的元组,元组组成的列表; 如果用两个变量接收, 就可以得到对应的键和值
for sku_id, count in cart_dict.items():
# 根据商品id获取商品信息
sku = GoodsSKU.objects.get(id=sku_id)
# 计算商品的小计 数量*单价
amount = sku.price * int(count)
# 动态给sku增加属性 count 和 amount, 分别保存购物车的商品数量和商品的小计
sku.count = count
sku.amount = amount
# 追加sku
skus.append(sku)
# 累计叠加商品的数量和商品的价格
total_count += int(count)
total_price += amount
# 上下文,模板和后台传递参数
context = {
'total_count': total_count,
'total_price': total_price,
'skus': skus
}
return render(request, 'cart.html', context)
# get 从服务器获取数据 // post 涉及到数据库的修改(增加,修改, 删除) --> 这里用post
# ajax请求操作,局部刷新, 只有购物车刷新,不是全部页面的刷新,
# ajax请求, 后台返回的是json数据
# 传递的参数: 商品的id(sku_id), 商品的数量(count)
# 匹配路径 /cart/add
class CartAddView(View):
# 添加购物车
def post(self, request):
# 获取用户:
user = request.user
if not user.is_authenticated():
# 用户未登录
return JsonResponse({'res': 0, 'errmsg': '请先登陆'})
# else: 表示用户已经登陆, 省略了, 注意缩进
# 接收参数: 用户提交的收藏数据: sku_id, count
sku_id= request.POST.get('sku_id')
count = request.POST.get('count')
# 校验接收到的参数
if not all([sku_id, count]):
return JsonResponse({'res': 1, 'errmsg': '数据不完整'})
# 数据完整了,要校验商品的id sku_id
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'res': 2, 'errmsg': '商品不存在'})
# 校验数量:
try:
count = int(count)
except Exception as e:
return JsonResponse({'res': 3, 'errmsg': '商品数量不正确'})
# else:
if count <= 0:
return JsonResponse({'res': 3, 'errmsg': '商品数量不正确'})
# 都正确了,下面开始处理业务逻辑
# sku_id 已经存在的商品,对商品的数量进行那个增加, 如果不存在,就新增加商品的sku_id
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
# 取出sku_id的值, hget()方法,
cart_count = con.hget(cart_key, sku_id)
# 如果能通过sku_id 取到cart_count, 就表示该商品已经加入购物车了
if cart_count:
count += int(cart_count)
# 已经加入购物车的商品再次添加时进行该商品数量上的添加, 要判断库存
#判断库存
if count > sku.stock:
return JsonResponse({'res': 4, 'errmsg': '该商品库存不足'})
# 设置sku_id 对应的值
con.hset(cart_key, sku_id, count)
# 获取购物车中商品的条目数
cart_count = con.hlen(cart_key)
# 返回应答 json数据
return JsonResponse({'res': 5, 'cart_count': cart_count, 'msg': '添加成功'})
# 在购物车页面进行的操作, 增加/减少,修改,删除
# 一部分不设计前后段交互的操作,可以直接前段jQ操作, 前面的勾选商品,是否全选, 后面的商品数量的增加减少, 都不涉及后端交互
# 设计前后段交互的要使用ajax请求 局部刷新 涉及后端交互的操作: 后面的删除,
class CartUpdateView(View):
# 涉及到数据库的操作,post请求
def post(self, request):
user = request.user
if not user.is_authenticated():
return JsonResponse({'res': 0, 'errmsg': '请先登陆'})
# 接收参数
sku_id = request.POST.get('sku_id')
count = request.POST.get('count')
# 对接收到的参数进行校验: 完整性, 有效性
if not all([sku_id, count]):
return JsonResponse({'res': 1, 'errmsg': '数据不完整'})
# 校验商品的id
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'res': 2, 'errmsg': '商品不存在'})
# 校验商品的数目
try:
count = int(count)
except Exception as e:
return JsonResponse({'res': 3, 'errmsg': '商品数量不合法'})
if count < 0:
return JsonResponse({'res': 3, 'errmsg': '商品数量不合法'})
# 业务处理, 更新购物车记录
con = get_redis_connection('default')
cart_key = 'cart_%d' % user.id
# 判断库存
if count > sku.stock:
return JsonResponse({'res': 4, 'errmsg': '商品库存不足'})
# 更新redis数据库中的商品的数量
con.hset(cart_key, sku_id, count)
# 计算购物车中商品的总件数
total_count = 0
vals = con.hvals(cart_key, sku_id)
for val in vals:
total_count += int(val)
return JsonResponse({'res': 5, 'total_count': total_count, 'msg': '更新成功'})
# 删除用户购物车商品
# 需要传递的参数: 商品的id
class CartDeleteView(View):
# 删除购物车
def post(self, request):
user = request.user
if not user.is_authenticated():
return JsonResponse({'res': 0, 'errmsg': '请先登陆'})
sku_id = request.POST.get('sku_id')
if not sku_id:
return JsonResponse({'res': 1, 'errmsg': '数据不完整'})
try:
sku = GoodsSKU.objects.get(id=sku_id)
except Exception as e:
return JsonResponse({'res': 2, 'errmsg': '商品不存在'})
# 处理业务
con = get_redis_connection('default')
cart_key = "cart_%d" % user.id
# 通过商品id删除信息
con.hdel(cart_key, sku_id)
# 计算用户购物车中商品的总数
total_count = 0
vals = con.hvals(cart_key)
for val in vals:
total_count += int(val)
return JsonResponse({'res': 3, 'total_count': total_count, 'msg': '删除成功'})
|
Python
|
CL
|
a07579d8466270a5d9c73e46eaf40ecc6ae4605a21850e537a10686eac162475
|
import os
from fastapi import FastAPI
from fastapi_sqlalchemy import DBSessionMiddleware # middleware helper
from fastapi_sqlalchemy import db # an object to provide global access to a database session
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from starlette.middleware.cors import CORSMiddleware
class CustomBase(object):
# Generate __tablename__ automatically
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
Base = declarative_base(cls=CustomBase)
origins = [
"http://localhost:3000",
]
def create_app():
app = FastAPI()
sqlalchemy_db_env = 'SQLALCHEMY_DATABASE_URI'
sqlalchemy_db_url = os.environ[sqlalchemy_db_env]
app.add_middleware(
DBSessionMiddleware,
db_url=sqlalchemy_db_url,
)
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
return app
|
Python
|
CL
|
16f460adddb4141be229d2448fcc81d67ef85ce2d76b1192285283fba38f8fa3
|
# Exercício Python 107 - Exercitando módulos em Python
# Crie um módulo chamado moeda.py que tenha as funções incorporadas aumentar(), diminuir(), dobro() e metade().
# Faça também um programa que importe esse módulo e use algumas dessas funções.
def moeda(preco, moeda='R$'):
"""
Retorna o preço formatado para moeda
:param preco: Valor do produto
:param moeda: A moeda desejada para a formatação, por padrão, real
:return: Preco formatado para moeda
"""
return f'{moeda}{preco:.2f}'.replace('.', ',')
def aumentar(preco, taxa, formatacao=False):
"""
Função para aumentar uma porcentagem sobre o produto
:param preco: Preço do produto
:param taxa: Porcentagem que será somada no produto
:param formatacao: Se True retorna o preço formatado com a função moeda()
:return: Aumento de taxa
"""
novoPreco = preco + (preco * taxa / 100)
return novoPreco if formatacao is False else moeda(novoPreco)
def diminuir(preco, taxa, formatacao=False):
"""
Função para diminuir uma porcentagem sobre o produto
:param preco: Preço do produto
:param taxa: Porcentagem que será somada no produto
:param formatacao: Se True retorna o preço formatado com a função moeda()
:return: Desconto de taxa
"""
novoPreco = preco - (preco * taxa / 100)
return novoPreco if formatacao is False else moeda(novoPreco)
def dobro(preco, formatacao=False):
"""
Função para calcular o dobro do preço
:param preco: Preço do produto
:param formatacao: Se True retorna o preço formatado com a função moeda()
:return: Dobro do produto
"""
novoPreco = preco * 2
return novoPreco if formatacao is False else moeda(novoPreco)
def metade(preco, formatacao=False):
"""
Função para calcular a metade do preço
:param preco: Preço do produto
:param formatacao: Se True retorna o preço formatado com a função moeda()
:return: Metade do preço
"""
novoPreco = preco / 2
return novoPreco if formatacao is False else moeda(novoPreco)
def resumo(preco, aumento, desconto):
print('-' * 30)
print(f'{"Resumo do valor":^30}')
print('-' * 30)
print(f'''{"Preço Analisado: ":<20}{moeda(preco):>10}
{"Dobro do preço: ":<20}{dobro(preco, formatacao=True):>10}
{"Metade do preço: ":<20}{metade(preco, formatacao=True):>10}
{aumento}{"% de aumento: ":<18}{aumentar(preco, aumento, formatacao=True):>10}
{desconto}{"% de desconto: ":<18}{diminuir(preco, desconto, formatacao=True):>10}''')
print('-' * 30)
|
Python
|
CL
|
1d3efc1e8aebab0669eeb4580b7606eb65cee5da51a604ec89fbc3f9a6966e78
|
# Import generic packages
import time
# Import multi-threading capacity
from multiprocessing import Queue
from threading import Thread, Event
# Import GUI packages
import tkinter as tk
import tkinter.font as tkfont
import tkinter.ttk as ttk
class gui_manager(Thread):
def __init__(self, outbound_queue, inbound_queue):
# Class name
self.__name = 'gui'
print(self.__name + ' thread - initializing ... ', end='')
# Class variables
# Class queues
self.inbound_q = inbound_queue
self.outbound_q = outbound_queue
# Class events
self._stopped = Event()
# Class command handlers
self.command_handlers = {
}
print('done.')
super(gui_manager, self).__init__()
def run(self):
print(self.__name + ' thread - starting.')
# Generate tkinter master window
self.gui_root = tk.Tk()
# Instantiate window classes
self.main_window = main_window(self.gui_root, self.inbound_q, self.outbound_q)
self.gui_root.after(0, self.run_gui)
self.gui_root.mainloop()
def run_gui(self):
# Gui main loop
if not self._stopped.is_set():
if not self.inbound_q.empty():
src_name, tgt_name, command, payload = self.inbound_q.get()
if command not in self.command_handlers:
print(self.__name + ' thread - No handler for ' + command)
else:
self.command_handlers[command](src_name, tgt_name, command, payload)
self.gui_root.update()
self.gui_root.after(0, self.run_gui)
else:
# Todo: set all tkinter varaibles to None on exit
self.gui_root.destroy()
self.gui_root.quit()
def stop(self):
# Set Stop event
print(self.__name + ' thread - Shutting down.')
self._stopped.set()
class main_window:
def __init__(self, gui_root, inbound_q, outbound_q):
# Class internal variables
self.__name = 'gui'
self.gui_root = gui_root
self.gui_root.title('Program')
self.frame = tk.Frame(self.gui_root)
self.inbound_q = inbound_q
self.outbound_q = outbound_q
# Window grid objects
self._tk_grid_obj = {
0: {0: None}
}
# Window grid variables
self._tk_var_obj = {
0: {0: 'Stop Main Program'}
}
# ========================================== GRID OBJECTS ========================================== #
# ============================================== ROW 0 ============================================= #
self._tk_grid_obj[0][0] = tk.Button(
self.gui_root,
text=self._tk_var_obj[0][0],
command=self.button_stop_main
)
self._tk_grid_obj[0][0].grid(row=0, column=0, padx=5, pady=5,
sticky=('N', 'W', 'E', 'S'))
# ========================================== Button Commands ========================================== #
def button_stop_main(self):
self.outbound_q.put((self.__name, 'master', 'stop_all', None))
|
Python
|
CL
|
ae99aeb4ef8d1e85f1ec14ab492d706cf2cb64fa243f760dbd3e87c3f2548648
|
#!/usr/bin/python3
tf.contrib.seq2seq.sequence_loss(
logits,
targets,
weights,
average_across_timesteps=True,
average_across_batch=True,
sum_over_timesteps=False,
sum_over_batch=False,
softmax_loss_function=None,
name=None
)
|
Python
|
CL
|
a169de2eba8e939fb632f7b744f5fd8e76700420c3a3733482e21146d7696457
|
#!/usr/bin/env python3
#encoding=utf-8
#----------------------------------
# Usage: python3 4-getattr-v-getattribute.py
# Description: compare the __getattr__ and __getattribute__
#----------------------------------
'''
To summarize the coding differences between __getattr__ and __getattribute__,
the following example uses both to implement three attributes -- attr1 is a
class attribute, attr2 is an instance attribute, and attr3 is a virtual managed
attribute computed when fetched
'''
class GetAttr:
attr1 = 1
def __init__(self):
self.attr2 = 2
def __getattr__(self, attr): # On undefined attrs only
print('get: ' + attr) # Not on attr1: inherited from class
if attr == 'attr3': # Not on attr2: stored on instance
return 3
else:
raise AttributeError(attr)
class GetAttribute(object): # (object) needed in 2.X only
attr1 = 1
def __init__(self):
self.attr2 = 2
def __getattribute__(self, attr): # On all attr fetches
print('get: ' + attr) # Use superclass to avoid looping here
if attr == 'attr3':
return 3
else:
return object.__getattribute__(self, attr)
if __name__ == '__main__':
# test for __getattr__
print('The result of __getattr__')
X = GetAttr()
print(X.attr1)
print(X.attr2)
print(X.attr3)
print()
print('-'*20)
# test for __getattribute__
print('The result of __getattribute__')
X = GetAttribute()
print(X.attr1)
print(X.attr2)
print(X.attr3)
print(X.attr4)
|
Python
|
CL
|
6ac6522f1a1d822a8bee8cdb1e61945b7fd684b5f3455fbd062e350fb8e0c27f
|
from abc import ABC, abstractmethod
class Env(ABC):
"""
An environment that an agent can interact with.
"""
def __init__(self):
"""
Creates the environment and allows an agent to interact with it.
Properties:
state_space (tuple): The dimensions of the input state.
action_space (tuple): The dimensions of the output actions.
state (object): The current state of the environment.
reward (float): The reward of the last action taken in the
environment.
terminal (bool): True if the current state is a terminal state.
truncated (bool): True if the current episode was truncated.
info (object): Any additional information of the environment.
"""
# The current information for the environment
self.state_space = ()
self.action_space = ()
self.state = None
self.reward = 0
self.terminal = False
self.truncated = False
self.info = None
@property
def done(self):
"""
Returns True if the environment episode is finished.
"""
return self.terminal or self.truncated
@abstractmethod
def step(self, action):
"""
Takes 1 step into the environment using the given action.
Args:
action (object): The action to take in the environment.
Returns an array of (next state, reward, terminal, info) tuples
"""
raise NotImplementedError
def render(self):
"""
If applicable, the environment will render to the screen.
"""
raise NotImplementedError
def sample_action(self):
"""
Samples an action from the environment action space.
"""
raise NotImplementedError
def n_steps(self, actions):
"""
Takes 1 step into the environment using the given action.
Args:
action (object): The action to take in the environment.
Returns:
An array of (next state, reward, terminal, info) tuples
"""
return [self.step(action) for action in actions]
@abstractmethod
def reset(self):
"""
Resets the environment.
"""
raise NotImplementedError
|
Python
|
CL
|
31af4f8f6ef142efc1012454ec5d3c987e3436bec2f29c741ffc870ee74f0758
|
import pandas as pd
import newspaper
from newspaper import Article
import torch
import numpy as np
from transformers import BertTokenizer
from transformers import BertForSequenceClassification
import pandas as pd
from textblob import TextBlob
from urllib.parse import urlparse
label_dict={'center': 5,
'left': 2,
'leftcenter': 1,
'nan': 4,
'right': 0,
'right-center': 3}
inverse_dic={
5:'center',
2:'left',
1:'leftcenter',
4:'nan',
0:'right',
3:'right-center'
}
def load_model_tokeniser():
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased',
do_lower_case=True)
model = BertForSequenceClassification.from_pretrained("bert-base-uncased",
num_labels=len(label_dict),
output_attentions=False,
output_hidden_states=False)
device=torch.device('cpu')
model.to(device)
model.load_state_dict(torch.load('app/finetuned_BERT_epoch_5.model',map_location=torch.device('cpu')))
return model,tokenizer
def newspaper_extract(model,tokenizer,url):
article = Article(url)
article.download()
article.parse()
authors =article.authors
date=article.publish_date
text= article.text
article.nlp()
summary= article.summary
title= article.title
parsed_uri = urlparse(url)
source_url = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)
if(text and summary):
inputs = tokenizer(text, padding='max_length', return_tensors='pt',truncation=True)
# print(len(inputs))
model.eval()
with torch.no_grad():
outputs = model(**inputs)
pred_labels = np.argmax(outputs[0].cpu().detach().numpy(), axis=1).tolist()
bias=inverse_dic[pred_labels[0]]
##Post is the data you want to enter into the database. Not specifying an ID generates a random one
sentiment=TextBlob(summary).sentiment
return date,summary,title,authors,bias,sentiment.polarity,sentiment.subjectivity,source_url,text
return None, None, None,None,None,None,None,None,None
|
Python
|
CL
|
ba90bd86eaa017be897527cac85802790c996490894987dbec6a3700413d142e
|
from pygame.rect import Rect
from pyplatformerengine.physics.CollisionDetectionFactory import CollisionDetectionFactory
"""
A basic physics component for moving a character around.
"""
class MotionlessPhysicsComponent:
"""
Initializes the object.
"""
def __init__(self, _id, desc):
self._id = _id
self.desc = desc
"""
Sets up the object.
"""
def setUp(self, actor, entity):
entity.rect = Rect(actor.stateDict["startPositionX"], actor.stateDict["startPositionY"], 1, 1)
if actor.stateDict["collisionEnabled"] != 0:
collisionDetectionFactory = CollisionDetectionFactory()
collisionDetectionFactory.addCollidable(actor, entity)
"""
Runs the update to the logic component.
"""
def update(self, actor, entity):
self.updateLocation(entity)
"""
Updates the logic location of the entity.
"""
def updateLocation(self, entity):
entity.rect.x += entity.deltaX
entity.rect.y += entity.deltaY
|
Python
|
CL
|
40540bebe8d6b93c747d085a98dffe5850f39139db56b401cc7b27c03e608d53
|
import numpy as np
from hamiltonians import hamiltonian_two_sites
from general_functions import (compute_eigensystem, compute_adiabatic_parameter, compute_parameters_interpolation, compute_period,
solve_system_unpack, sort_solution)
import matplotlib.pyplot as plt
import concurrent.futures
from scipy.constants import h, e
from tqdm import tqdm
import time as timer
from multiprocessing import Pool
workers = 8
hbar = ((h / e) / (2 * np.pi)) * 10 ** 6 * 10 ** 9 # Value for the reduced Plank's constant [ueV * ns]
J = 10
U = 22.3 * J
n_Delta = 2 ** 15 + 1 # This number of elements is required for the romb method of integration used below
limit_Delta = 66.7
Delta_vector = np.linspace(limit_Delta, -limit_Delta, n_Delta, endpoint=True) * J
parameters = [Delta_vector, U, J] # List with the parameters of the system
labels = [r'$(\epsilon+u)/E_Z$', r'$E_n/J$'] # Labels of the figure
# Compute and plot the eigenenergies of the system
energies, states = compute_eigensystem(parameters, hamiltonian_two_sites, plot=False)
partial_hamiltonian = np.zeros([n_Delta, 3, 3], dtype=complex)
partial_hamiltonian[:, 2, 2] = -1
partial_hamiltonian[:, 0, 0] = 1
factors, c_tilde = compute_adiabatic_parameter(Delta_vector, states, energies, 0, hbar=hbar, partial_Hamiltonian=partial_hamiltonian)
# Solve the EDO to obtain the dependency of eps with the parameter s
s, Delta_sol = compute_parameters_interpolation(Delta_vector, factors, c_tilde, method_1=False) # parameters = [Delta_sol, U, J]
T = compute_period(Delta_sol, hamiltonian_two_sites, parameters, hbar, index=0, state=0)
n_tf = 400
tf_vec = np.linspace(0.1 * hbar / J, 26 * hbar / J, n_tf)
n_t = 10 ** 3
density0 = np.zeros([3, 3], dtype=complex) # Initialize the variable to save the density matrix
density0[2, 2] = 1 # Initially the only state populated is the triplet (in our basis the first state)
args = []
for i in range(0, n_tf):
time = np.linspace(0, tf_vec[i], n_t) # Time vector in which compute the solution of the population
temp = [i, time, density0, [Delta_sol, U, J], hamiltonian_two_sites,
{'normalization': tf_vec[i], 'atol': 1e-8, 'rtol': 1e-6, 'hbar': hbar}] # List of parameters and default parameters as a dic
args.append(temp)
if __name__ == '__main__':
start = timer.perf_counter()
results_list = [] # Empty list in which save the async results
pbar = tqdm(total=n_tf, desc='Processing', ncols=90, bar_format='{l_bar}{bar}{r_bar}')
with concurrent.futures.ProcessPoolExecutor(max_workers=workers) as executor:
results = executor.map(solve_system_unpack, args)
for result in results:
results_list.append(result)
pbar.update()
# pool = Pool()
# for i, result in enumerate(pool.imap_unordered(solve_system_unpack, args), 1): # Iterate over all the desired parameters
# results_list.append(result) # Save the result
# pbar.update() # Update the progress bar
# pool.terminate() # Terminate the pool
# for i in range(0, n_tf):
# result = solve_system_unpack(args[i])
# results_list.append(result)
# pbar.update()
final = timer.perf_counter()
print('\nThe computation took {} s'.format(final - start))
results_sort = sort_solution(results_list) # Sort the async results
fidelity = np.zeros(n_tf)
for i in range(0, n_tf):
fidelity[i] = results_sort[i][-1, 0] # Extract the data from the final time (i), last time computed (-1), and the state S(1,1) (1)
maximum = False
counter = 0
while not maximum:
if fidelity[counter] > fidelity[counter + 1]:
index_max = counter
maximum = True
else:
counter += 1
plt.plot(tf_vec * J / hbar, fidelity)
for i in range(0, 5):
plt.vlines((tf_vec[index_max] + T * i) * J / hbar, 0, 1, linestyle='--')
|
Python
|
CL
|
8835007c78d1437b05ca1b111e19c058da6201c7565b12ed55cb6d134f864db0
|
import requests
import time
import tempfile
from .config import url_request, url_response, app_key
from .errors import RuCaptchaError
class RotateCaptcha:
def __init__(self, rucaptcha_key, sleep_time=5):
'''
Инициализация нужных переменных, создание папки для изображений и кэша
После завершения работы - удалются временные фалйы и папки
:param rucaptcha_key: АПИ ключ капчи из кабинета пользователя
:param sleep_time: Вермя ожидания решения капчи
'''
self.RUCAPTCHA_KEY = rucaptcha_key
self.sleep_time = sleep_time
# Работа с капчёй
def captcha_handler(self, captcha_link):
'''
Метод получает от вас ссылку на изображение, скачивает его, отправляет изображение на сервер
RuCaptcha, дожидается решения капчи и вовзращает вам результат
:param captcha_link: Ссылка на изображение
:return: Ответ на капчу
'''
# Скачиваем изображение
content = requests.get(captcha_link).content
with tempfile.NamedTemporaryFile(suffix='.jpg') as out:
out.write(content)
captcha_image = open(out.name, 'rb')
# Отправляем изображение файлом
files = {'file': captcha_image}
# Создаём пайлоад, вводим ключ от сайта, выбираем метод ПОСТ и ждём ответа в JSON-формате
payload = {"key": self.RUCAPTCHA_KEY,
"method": "rotatecaptcha",
"json": 1,
"soft_id": app_key}
# Отправляем на рукапча изображение капчи и другие парметры,
# в результате получаем JSON ответ с номером решаемой капчи и получая ответ - извлекаем номер
captcha_id = requests.request('POST',
url_request,
data=payload,
files=files).json()
if captcha_id['status'] is 0:
return RuCaptchaError(captcha_id['request'])
captcha_id = captcha_id['request']
# Ожидаем решения капчи
time.sleep(self.sleep_time)
while True:
# отправляем запрос на результат решения капчи, если ещё капча не решена - ожидаем 5 сек
# если всё ок - идём дальше
payload = {'key': self.RUCAPTCHA_KEY,
'action': 'get',
'id': captcha_id,
'json': 1,
}
# отправляем запрос на результат решения капчи, если не решена ожидаем 6 секунд
captcha_response = requests.post(url_response, data = payload)
if captcha_response.json()['request']=='CAPCHA_NOT_READY':
time.sleep(self.sleep_time)
elif captcha_response.json()["status"]==0:
return RuCaptchaError(captcha_response.json()["request"])
elif captcha_response.json()["status"]==1 :
return captcha_response.json()['request']
|
Python
|
CL
|
f8f609b18750bf29e97d2a1463ecf42e737b4891bf7015eab30c75a94fbc9366
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.