content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
"""Command line tools for Flask server app."""
from os import environ
from uuid import UUID
from flask_script import Manager
from flask_migrate import MigrateCommand, upgrade
from app import create_app, db
from app.mongo import drop_mongo_collections
from app.authentication.models import User, PasswordAuthentication, OrganizationMembership
from app.samples.sample_models import Sample
from app.sample_groups.sample_group_models import SampleGroup
app = create_app()
manager = Manager(app) # pylint: disable=invalid-name
manager.add_command('db', MigrateCommand)
# These must be imported AFTER Mongo connection has been established during app creation
# pylint: disable=wrong-import-position
from seed import create_abrf_analysis_result as create_abrf_result
from seed.fuzz import generate_metadata, create_saved_group
# pylint: enable=wrong-import-position
@manager.command
def recreate_db():
"""Recreate a database using migrations."""
# We cannot simply use db.drop_all() because it will not drop the alembic_versions table
sql = 'SELECT \
\'drop table if exists "\' || tablename || \'" cascade;\' as pg_drop \
FROM \
pg_tables \
WHERE \
schemaname=\'public\';'
drop_statements = db.engine.execute(sql)
if drop_statements.rowcount > 0:
drop_statement = '\n'.join([x['pg_drop'] for x in drop_statements])
drop_statements.close()
db.engine.execute(drop_statement)
# Run migrations
upgrade()
# Empty Mongo database
drop_mongo_collections()
def get_user():
"""Get the password from env vars or a default."""
username = environ.get('SEED_USER_USERNAME', 'bchrobot')
email = environ.get('SEED_USER_EMAIL', 'benjamin.blair.chrobot@gmail.com')
password = environ.get('SEED_USER_PASSWORD', 'Foobar22')
new_user = User(
username=username,
email=email,
user_type='user',
)
new_user.password_authentication = PasswordAuthentication(password=password)
return new_user
@manager.command
def seed_users():
"""Seed just the users for the database."""
db.session.add(get_user())
db.session.commit()
@manager.command
def seed_db():
"""Seed the database."""
default_user = get_user()
# Create Mason Lab
mason_lab = User(
username='MasonLab',
email='benjamin.blair.chrobot+masonlab@gmail.com',
user_type='organization',
)
membership = OrganizationMembership(role='admin')
membership.user = default_user
mason_lab.user_memberships.append(membership)
db.session.add_all([mason_lab, membership])
db.session.commit()
# Create ABRF sample group
abrf_uuid = UUID('00000000-0000-4000-8000-000000000000')
abrf_description = 'ABRF San Diego Mar 24th-29th 2017'
abrf_2017_group = SampleGroup(name='ABRF 2017',
owner_uuid=mason_lab.uuid,
owner_name=mason_lab.username,
is_library=True,
analysis_result=create_abrf_result(save=True),
description=abrf_description)
abrf_2017_group.uuid = abrf_uuid
abrf_sample_01 = Sample(name='SomethingUnique_A',
library_uuid=abrf_uuid,
analysis_result=create_abrf_result(save=True),
metadata=generate_metadata()).save()
abrf_sample_02 = Sample(name='SomethingUnique_B',
library_uuid=abrf_uuid,
analysis_result=create_abrf_result(save=True),
metadata=generate_metadata()).save()
abrf_2017_group.samples = [abrf_sample_01, abrf_sample_02]
db.session.add(abrf_2017_group)
db.session.commit()
# Create fuzzed group
fuzz_uuid = UUID('00000000-0000-4000-8000-000000000001')
create_saved_group(owner=mason_lab, uuid=fuzz_uuid)
if __name__ == '__main__':
manager.run()
|
nilq/baby-python
|
python
|
import torch
from transformers import BertTokenizerFast
from colbert.modeling.tokenization.utils import _split_into_batches, _sort_by_length
class DocTokenizer():
def __init__(self, doc_maxlen):
self.tok = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.doc_maxlen = doc_maxlen
self.D_marker_token, self.D_marker_token_id = '[D]', self.tok.get_vocab()['[unused1]']
self.cls_token, self.cls_token_id = self.tok.cls_token, self.tok.cls_token_id
self.sep_token, self.sep_token_id = self.tok.sep_token, self.tok.sep_token_id
assert self.D_marker_token_id == 2
def tokenize(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
tokens = [self.tok.tokenize(x, add_special_tokens=False) for x in batch_text]
if not add_special_tokens:
return tokens
prefix, suffix = [self.cls_token, self.D_marker_token], [self.sep_token]
tokens = [prefix + lst + suffix for lst in tokens]
return tokens
def encode(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
ids = self.tok(batch_text, add_special_tokens=False)['input_ids']
if not add_special_tokens:
return ids
prefix, suffix = [self.cls_token_id, self.D_marker_token_id], [self.sep_token_id]
ids = [prefix + lst + suffix for lst in ids]
return ids
def tensorize(self, batch_text, bsize=None):
assert type(batch_text) in [list, tuple], (type(batch_text))
# add placehold for the [D] marker
batch_text = ['. ' + x for x in batch_text]
obj = self.tok(batch_text, padding='longest', truncation='longest_first',
return_tensors='pt', max_length=self.doc_maxlen)
ids, mask = obj['input_ids'], obj['attention_mask']
# postprocess for the [D] marker
ids[:, 1] = self.D_marker_token_id
if bsize:
ids, mask, reverse_indices = _sort_by_length(ids, mask, bsize)
batches = _split_into_batches(ids, mask, bsize)
return batches, reverse_indices
return ids, mask
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
import numpy
_default_epsilon_length = 1e-5
_default_epsilon_angle = 1e-5
def change_reference(reciprocal_cell, kpoints, to_cartesian=True):
"""
Change reference system, from cartesian to crystal coordinates (units of b1,b2,b3) or viceversa.
:param reciprocal_cell: a 3x3 array representing the cell lattice vectors in reciprocal space
:param kpoints: a list of (3) point coordinates
:return kpoints: a list of (3) point coordinates in the new reference
"""
if not isinstance(kpoints, numpy.ndarray):
raise ValueError('kpoints must be a numpy.array')
transposed_cell = numpy.transpose(numpy.array(reciprocal_cell))
if to_cartesian:
matrix = transposed_cell
else:
matrix = numpy.linalg.inv(transposed_cell)
# note: kpoints is a list Nx3, matrix is 3x3.
# hence, first transpose kpoints, then multiply, finally transpose it back
return numpy.transpose(numpy.dot(matrix, numpy.transpose(kpoints)))
def analyze_cell(cell=None, pbc=None):
"""
A function executed by the __init__ or by set_cell.
If a cell is set, properties like a1, a2, a3, cosalpha, reciprocal_cell are
set as well, although they are not stored in the DB.
:note: units are Angstrom for the cell parameters, 1/Angstrom for the
reciprocal cell parameters.
"""
if pbc is None:
pbc = [True, True, True]
dimension = sum(pbc)
if cell is None:
return {
'reciprocal_cell': None,
'dimension': dimension,
'pbc': pbc
}
the_cell = numpy.array(cell)
reciprocal_cell = 2. * numpy.pi * numpy.linalg.inv(the_cell).transpose()
a1 = numpy.array(the_cell[0, :]) # units = Angstrom
a2 = numpy.array(the_cell[1, :]) # units = Angstrom
a3 = numpy.array(the_cell[2, :]) # units = Angstrom
a = numpy.linalg.norm(a1) # units = Angstrom
b = numpy.linalg.norm(a2) # units = Angstrom
c = numpy.linalg.norm(a3) # units = Angstrom
b1 = reciprocal_cell[0, :] # units = 1/Angstrom
b2 = reciprocal_cell[1, :] # units = 1/Angstrom
b3 = reciprocal_cell[2, :] # units = 1/Angstrom
cosalpha = numpy.dot(a2, a3) / b / c
cosbeta = numpy.dot(a3, a1) / c / a
cosgamma = numpy.dot(a1, a2) / a / b
result = {
'a1': a1,
'a2': a2,
'a3': a3,
'a': a,
'b': b,
'c': c,
'b1': b1,
'b2': b2,
'b3': b3,
'cosalpha': cosalpha,
'cosbeta': cosbeta,
'cosgamma': cosgamma,
'dimension': dimension,
'reciprocal_cell': reciprocal_cell,
'pbc': pbc,
}
return result
def get_explicit_kpoints_path(value=None, cell=None, pbc=None, kpoint_distance=None, cartesian=False,
epsilon_length=_default_epsilon_length,
epsilon_angle=_default_epsilon_angle):
"""
Set a path of kpoints in the Brillouin zone.
:param value: description of the path, in various possible formats.
None: automatically sets all irreducible high symmetry paths.
Requires that a cell was set
or::
[('G','M'), (...), ...]
[('G','M',30), (...), ...]
[('G',(0,0,0),'M',(1,1,1)), (...), ...]
[('G',(0,0,0),'M',(1,1,1),30), (...), ...]
:param cell: 3x3 array representing the structure cell lattice vectors
:param pbc: 3-dimensional array of booleans signifying the periodic boundary
conditions along each lattice vector
:param float kpoint_distance: parameter controlling the distance between
kpoints. Distance is given in crystal coordinates, i.e. the distance
is computed in the space of b1,b2,b3. The distance set will be the
closest possible to this value, compatible with the requirement of
putting equispaced points between two special points (since extrema
are included).
:param bool cartesian: if set to true, reads the coordinates eventually
passed in value as cartesian coordinates. Default: False.
:param float epsilon_length: threshold on lengths comparison, used
to get the bravais lattice info. It has to be used if the
user wants to be sure the right symmetries are recognized.
:param float epsilon_angle: threshold on angles comparison, used
to get the bravais lattice info. It has to be used if the
user wants to be sure the right symmetries are recognized.
:returns: point_coordinates, path, bravais_info, explicit_kpoints, labels
"""
bravais_info = find_bravais_info(
cell=cell, pbc=pbc,
epsilon_length=epsilon_length,
epsilon_angle=epsilon_angle
)
analysis = analyze_cell(cell, pbc)
dimension = analysis['dimension']
reciprocal_cell = analysis['reciprocal_cell']
pbc = list(analysis['pbc'])
if dimension == 0:
# case with zero dimension: only gamma-point is set
return [[0., 0., 0.]], None, bravais_info
def _is_path_1(path):
try:
are_two = all([len(i) == 2 for i in path])
if not are_two:
return False
for i in path:
are_str = all([isinstance(b, str) for b in i])
if not are_str:
return False
except IndexError:
return False
return True
def _is_path_2(path):
try:
are_three = all([len(i) == 3 for i in path])
if not are_three:
return False
are_good = all([all([isinstance(b[0], str),
isinstance(b[1], str),
isinstance(b[2], int)])
for b in path])
if not are_good:
return False
# check that at least two points per segment (beginning and end)
points_num = [int(i[2]) for i in path]
if any([i < 2 for i in points_num]):
raise ValueError('Must set at least two points per path '
'segment')
except IndexError:
return False
return True
def _is_path_3(path):
# [('G',(0,0,0),'M',(1,1,1)), (...), ...]
try:
_ = len(path)
are_four = all([len(i) == 4 for i in path])
if not are_four:
return False
have_labels = all(all([isinstance(i[0], str), isinstance(i[2], str)]) for i in path)
if not have_labels:
return False
for i in path:
coord1 = [float(j) for j in i[1]]
coord2 = [float(j) for j in i[3]]
if len(coord1) != 3 or len(coord2) != 3:
return False
except (TypeError, IndexError):
return False
return True
def _is_path_4(path):
# [('G',(0,0,0),'M',(1,1,1),30), (...), ...]
try:
_ = len(path)
are_five = all([len(i) == 5 for i in path])
if not are_five:
return False
have_labels = all(all([isinstance(i[0], str), isinstance(i[2], str)]) for i in path)
if not have_labels:
return False
have_points_num = all([isinstance(i[4], int) for i in path])
if not have_points_num:
return False
# check that at least two points per segment (beginning and end)
points_num = [int(i[4]) for i in path]
if any([i < 2 for i in points_num]):
raise ValueError('Must set at least two points per path '
'segment')
for i in path:
coord1 = [float(j) for j in i[1]]
coord2 = [float(j) for j in i[3]]
if len(coord1) != 3 or len(coord2) != 3:
return False
except (TypeError, IndexError):
return False
return True
def _num_points_from_coordinates(path, point_coordinates, kpoint_distance=None):
# NOTE: this way of creating intervals ensures equispaced objects
# in crystal coordinates of b1,b2,b3
distances = [numpy.linalg.norm(numpy.array(point_coordinates[i[0]]) -
numpy.array(point_coordinates[i[1]])
) for i in path]
if kpoint_distance is None:
# Use max_points_per_interval as the default guess for automatically
# guessing the number of points
max_point_per_interval = 10
max_interval = max(distances)
try:
points_per_piece = [max(2, max_point_per_interval * i // max_interval) for i in distances]
except ValueError:
raise ValueError('The beginning and end of each segment in the '
'path should be different.')
else:
points_per_piece = [max(2, int(distance // kpoint_distance)) for distance in distances]
return points_per_piece
if cartesian:
if cell is None:
raise ValueError('To use cartesian coordinates, a cell must '
'be provided')
if kpoint_distance is not None:
if kpoint_distance <= 0.:
raise ValueError('kpoints_distance must be a positive float')
if value is None:
if cell is None:
raise ValueError('Cannot set a path not even knowing the '
'kpoints or at least the cell')
point_coordinates, path, bravais_info = get_kpoints_path(
cell=cell, pbc=pbc, cartesian=cartesian,
epsilon_length=epsilon_length,
epsilon_angle=epsilon_angle)
num_points = _num_points_from_coordinates(path, point_coordinates, kpoint_distance)
elif _is_path_1(value):
# in the form [('X','M'),(...),...]
if cell is None:
raise ValueError('Cannot set a path not even knowing the '
'kpoints or at least the cell')
path = value
point_coordinates, _, bravais_info = get_kpoints_path(
cell=cell, pbc=pbc, cartesian=cartesian,
epsilon_length=epsilon_length,
epsilon_angle=epsilon_angle)
num_points = _num_points_from_coordinates(path, point_coordinates, kpoint_distance)
elif _is_path_2(value):
# [('G','M',30), (...), ...]
if cell is None:
raise ValueError('Cannot set a path not even knowing the '
'kpoints or at least the cell')
path = [(i[0], i[1]) for i in value]
point_coordinates, _, bravais_info = get_kpoints_path(
cell=cell, pbc=pbc, cartesian=cartesian,
epsilon_length=epsilon_length,
epsilon_angle=epsilon_angle)
num_points = [i[2] for i in value]
elif _is_path_3(value):
# [('G',(0,0,0),'M',(1,1,1)), (...), ...]
path = [(i[0], i[2]) for i in value]
point_coordinates = {}
for piece in value:
if piece[0] in point_coordinates:
if point_coordinates[piece[0]] != piece[1]:
raise ValueError('Different points cannot have the same label')
else:
if cartesian:
point_coordinates[piece[0]] = change_reference(
reciprocal_cell,
numpy.array([piece[1]]),
to_cartesian=False)[0]
else:
point_coordinates[piece[0]] = piece[1]
if piece[2] in point_coordinates:
if point_coordinates[piece[2]] != piece[3]:
raise ValueError('Different points cannot have the same label')
else:
if cartesian:
point_coordinates[piece[2]] = change_reference(
reciprocal_cell,
numpy.array([piece[3]]),
to_cartesian=False)[0]
else:
point_coordinates[piece[2]] = piece[3]
num_points = _num_points_from_coordinates(path, point_coordinates, kpoint_distance)
elif _is_path_4(value):
# [('G',(0,0,0),'M',(1,1,1),30), (...), ...]
path = [(i[0], i[2]) for i in value]
point_coordinates = {}
for piece in value:
if piece[0] in point_coordinates:
if point_coordinates[piece[0]] != piece[1]:
raise ValueError('Different points cannot have the same label')
else:
if cartesian:
point_coordinates[piece[0]] = change_reference(
reciprocal_cell,
numpy.array([piece[1]]),
to_cartesian=False)[0]
else:
point_coordinates[piece[0]] = piece[1]
if piece[2] in point_coordinates:
if point_coordinates[piece[2]] != piece[3]:
raise ValueError('Different points cannot have the same label')
else:
if cartesian:
point_coordinates[piece[2]] = change_reference(
reciprocal_cell,
numpy.array([piece[3]]),
to_cartesian=False)[0]
else:
point_coordinates[piece[2]] = piece[3]
num_points = [i[4] for i in value]
else:
raise ValueError('Input format not recognized')
explicit_kpoints = [tuple(point_coordinates[path[0][0]])]
labels = [(0, path[0][0])]
for count_piece, i in enumerate(path):
ini_label = i[0]
end_label = i[1]
ini_coord = point_coordinates[ini_label]
end_coord = point_coordinates[end_label]
path_piece = list(zip(numpy.linspace(ini_coord[0], end_coord[0],
num_points[count_piece]),
numpy.linspace(ini_coord[1], end_coord[1],
num_points[count_piece]),
numpy.linspace(ini_coord[2], end_coord[2],
num_points[count_piece]),
))
for count, j in enumerate(path_piece):
if all(numpy.array(explicit_kpoints[-1]) == j):
continue # avoid duplcates
else:
explicit_kpoints.append(j)
# add labels for the first and last point
if count == 0:
labels.append((len(explicit_kpoints) - 1, ini_label))
if count == len(path_piece) - 1:
labels.append((len(explicit_kpoints) - 1, end_label))
# I still have some duplicates in the labels: eliminate them
sorted(set(labels), key=lambda x: x[0])
return point_coordinates, path, bravais_info, explicit_kpoints, labels
def find_bravais_info(cell, pbc, epsilon_length=_default_epsilon_length,
epsilon_angle=_default_epsilon_angle):
"""
Finds the Bravais lattice of the cell passed in input to the Kpoint class
:note: We assume that the cell given by the cell property is the
primitive unit cell.
.. note:: in 3D, this implementation expects
that the structure is already standardized according to the Setyawan
paper. If this is not the case, the kpoints and band structure returned will be incorrect. The only case
that is dealt correctly by the library is the case when axes are swapped, where the library correctly
takes this swapping/rotation into account to assign kpoint labels and coordinates.
:param cell: 3x3 array representing the structure cell lattice vectors
:param pbc: 3-dimensional array of booleans signifying the periodic boundary
conditions along each lattice vector
passed in value as cartesian coordinates. Default: False.
:param float epsilon_length: threshold on lengths comparison, used
to get the bravais lattice info. It has to be used if the
user wants to be sure the right symmetries are recognized.
:param float epsilon_angle: threshold on angles comparison, used
to get the bravais lattice info. It has to be used if the
user wants to be sure the right symmetries are recognized.
:return: a dictionary, with keys short_name, extended_name, index
(index of the Bravais lattice), and sometimes variation (name of
the variation of the Bravais lattice) and extra (a dictionary
with extra parameters used by the get_kpoints_path method)
"""
if cell is None:
return None
analysis = analyze_cell(cell, pbc)
a1 = analysis['a1']
a2 = analysis['a2']
a3 = analysis['a3']
a = analysis['a']
b = analysis['b']
c = analysis['c']
cosa = analysis['cosalpha']
cosb = analysis['cosbeta']
cosc = analysis['cosgamma']
dimension = analysis['dimension']
pbc = list(pbc)
# values of cosines at various angles
_90 = 0.
_60 = 0.5
_30 = numpy.sqrt(3.) / 2.
_120 = -0.5
# NOTE: in what follows, I'm assuming the textbook order of alfa, beta and gamma
# TODO: Maybe additional checks to see if the "correct" primitive
# cell is used ? (there are other equivalent primitive
# unit cells to the one expected here, typically for body-, c-, and
# face-centered lattices)
def l_are_equals(a, b):
# function to compare lengths
return abs(a - b) <= epsilon_length
def a_are_equals(a, b):
# function to compare angles (actually, cosines)
return abs(a - b) <= epsilon_angle
if dimension == 3:
# =========================================#
# 3D case -> 14 possible Bravais lattices #
# =========================================#
comparison_length = [l_are_equals(a, b), l_are_equals(b, c),
l_are_equals(c, a)]
comparison_angles = [a_are_equals(cosa, cosb), a_are_equals(cosb, cosc),
a_are_equals(cosc, cosa)]
if comparison_length.count(True) == 3:
# needed for the body centered orthorhombic:
orci_a = numpy.linalg.norm(a2 + a3)
orci_b = numpy.linalg.norm(a1 + a3)
orci_c = numpy.linalg.norm(a1 + a2)
orci_the_a, orci_the_b, orci_the_c = sorted([orci_a, orci_b, orci_c])
bco1 = - (-orci_the_a ** 2 + orci_the_b ** 2 + orci_the_c ** 2) / (4. * a ** 2)
bco2 = - (orci_the_a ** 2 - orci_the_b ** 2 + orci_the_c ** 2) / (4. * a ** 2)
bco3 = - (orci_the_a ** 2 + orci_the_b ** 2 - orci_the_c ** 2) / (4. * a ** 2)
# ======================#
# simple cubic lattice #
# ======================#
if comparison_angles.count(True) == 3 and a_are_equals(cosa, _90):
bravais_info = {'short_name': 'cub',
'extended_name': 'cubic',
'index': 1,
'permutation': [0, 1, 2]
}
# =====================#
# face centered cubic #
# =====================#
elif comparison_angles.count(True) == 3 and a_are_equals(cosa, _60):
bravais_info = {'short_name': 'fcc',
'extended_name': 'face centered cubic',
'index': 2,
'permutation': [0, 1, 2]
}
# =====================#
# body centered cubic #
# =====================#
elif comparison_angles.count(True) == 3 and a_are_equals(cosa, -1. / 3.):
bravais_info = {'short_name': 'bcc',
'extended_name': 'body centered cubic',
'index': 3,
'permutation': [0, 1, 2]
}
# ==============#
# rhombohedral #
# ==============#
elif comparison_angles.count(True) == 3:
# logical order is important, this check must come after the cubic cases
bravais_info = {'short_name': 'rhl',
'extended_name': 'rhombohedral',
'index': 11,
'permutation': [0, 1, 2]
}
if cosa > 0.:
bravais_info['variation'] = 'rhl1'
eta = (1. + 4. * cosa) / (2. + 4. * cosa)
bravais_info['extra'] = {'eta': eta,
'nu': 0.75 - eta / 2.,
}
else:
bravais_info['variation'] = 'rhl2'
eta = 1. / (2. * (1. - cosa) / (1. + cosa))
bravais_info['extra'] = {'eta': eta,
'nu': 0.75 - eta / 2.,
}
# ==========================#
# body centered tetragonal #
# ==========================#
elif comparison_angles.count(True) == 1: # two angles are the same
bravais_info = {'short_name': 'bct',
'extended_name': 'body centered tetragonal',
'index': 5,
}
if comparison_angles.index(True) == 0: # alfa=beta
ref_ang = cosa
bravais_info['permutation'] = [0, 1, 2]
elif comparison_angles.index(True) == 1: # beta=gamma
ref_ang = cosb
bravais_info['permutation'] = [2, 0, 1]
else: # comparison_angles.index(True)==2: # gamma = alfa
ref_ang = cosc
bravais_info['permutation'] = [1, 2, 0]
if ref_ang >= 0.:
raise ValueError('Problems on the definition of '
'body centered tetragonal lattices')
the_c = numpy.sqrt(-4. * ref_ang * (a ** 2))
the_a = numpy.sqrt(2. * a ** 2 - (the_c ** 2) / 2.)
if the_c < the_a:
bravais_info['variation'] = 'bct1'
bravais_info['extra'] = {'eta': (1. + (the_c / the_a) ** 2) / 4.}
else:
bravais_info['variation'] = 'bct2'
bravais_info['extra'] = {'eta': (1. + (the_a / the_c) ** 2) / 4.,
'csi': ((the_a / the_c) ** 2) / 2.,
}
# ============================#
# body centered orthorhombic #
# ============================#
elif (any([a_are_equals(cosa, bco1), a_are_equals(cosb, bco1), a_are_equals(cosc, bco1)]) and
any([a_are_equals(cosa, bco2), a_are_equals(cosb, bco2), a_are_equals(cosc, bco2)]) and
any([a_are_equals(cosa, bco3), a_are_equals(cosb, bco3), a_are_equals(cosc, bco3)])
):
bravais_info = {'short_name': 'orci',
'extended_name': 'body centered orthorhombic',
'index': 8,
}
if a_are_equals(cosa, bco1) and a_are_equals(cosc, bco3):
bravais_info['permutation'] = [0, 1, 2]
if a_are_equals(cosa, bco1) and a_are_equals(cosc, bco2):
bravais_info['permutation'] = [0, 2, 1]
if a_are_equals(cosa, bco3) and a_are_equals(cosc, bco2):
bravais_info['permutation'] = [1, 2, 0]
if a_are_equals(cosa, bco2) and a_are_equals(cosc, bco3):
bravais_info['permutation'] = [1, 0, 2]
if a_are_equals(cosa, bco2) and a_are_equals(cosc, bco1):
bravais_info['permutation'] = [2, 0, 1]
if a_are_equals(cosa, bco3) and a_are_equals(cosc, bco1):
bravais_info['permutation'] = [2, 1, 0]
bravais_info['extra'] = {'csi': (1. + (orci_the_a / orci_the_c) ** 2) / 4.,
'eta': (1. + (orci_the_b / orci_the_c) ** 2) / 4.,
'dlt': (orci_the_b ** 2 - orci_the_a ** 2) / (4. * orci_the_c ** 2),
'mu': (orci_the_a ** 2 + orci_the_b ** 2) / (4. * orci_the_c ** 2),
}
# if it doesn't fall in the above, is triclinic
else:
bravais_info = {'short_name': 'tri',
'extended_name': 'triclinic',
'index': 14,
}
# the check for triclinic variations is at the end of the method
elif comparison_length.count(True) == 1:
# ============#
# tetragonal #
# ============#
if comparison_angles.count(True) == 3 and a_are_equals(cosa, _90):
bravais_info = {'short_name': 'tet',
'extended_name': 'tetragonal',
'index': 4,
}
if comparison_length[0] == True:
bravais_info['permutation'] = [0, 1, 2]
if comparison_length[1] == True:
bravais_info['permutation'] = [2, 0, 1]
if comparison_length[2] == True:
bravais_info['permutation'] = [1, 2, 0]
# ====================================#
# c-centered orthorombic + hexagonal #
# ====================================#
# alpha/=beta=gamma=pi/2
elif (comparison_angles.count(True) == 1 and
any([a_are_equals(cosa, _90), a_are_equals(cosb, _90), a_are_equals(cosc, _90)])
):
if any([a_are_equals(cosa, _120), a_are_equals(cosb, _120), a_are_equals(cosc, _120)]):
bravais_info = {'short_name': 'hex',
'extended_name': 'hexagonal',
'index': 10,
}
else:
bravais_info = {'short_name': 'orcc',
'extended_name': 'c-centered orthorhombic',
'index': 9,
}
if comparison_length[0] == True:
the_a1 = a1
the_a2 = a2
elif comparison_length[1] == True:
the_a1 = a2
the_a2 = a3
else: # comparison_length[2]==True:
the_a1 = a3
the_a2 = a1
the_a = numpy.linalg.norm(the_a1 + the_a2)
the_b = numpy.linalg.norm(the_a1 - the_a2)
bravais_info['extra'] = {'csi': (1. + (the_a / the_b) ** 2) / 4.,
}
# TODO : re-check this case, permutations look weird
if comparison_length[0] == True:
bravais_info['permutation'] = [0, 1, 2]
if comparison_length[1] == True:
bravais_info['permutation'] = [2, 0, 1]
if comparison_length[2] == True:
bravais_info['permutation'] = [1, 2, 0]
# =======================#
# c-centered monoclinic #
# =======================#
elif comparison_angles.count(True) == 1:
bravais_info = {'short_name': 'mclc',
'extended_name': 'c-centered monoclinic',
'index': 13,
}
# TODO : re-check this case, permutations look weird
if comparison_length[0] == True:
bravais_info['permutation'] = [0, 1, 2]
the_ka = cosa
the_a1 = a1
the_a2 = a2
the_c = c
if comparison_length[1] == True:
bravais_info['permutation'] = [2, 0, 1]
the_ka = cosb
the_a1 = a2
the_a2 = a3
the_c = a
if comparison_length[2] == True:
bravais_info['permutation'] = [1, 2, 0]
the_ka = cosc
the_a1 = a3
the_a2 = a1
the_c = b
the_b = numpy.linalg.norm(the_a1 + the_a2)
the_a = numpy.linalg.norm(the_a1 - the_a2)
the_cosa = 2. * numpy.linalg.norm(the_a1) / the_b * the_ka
if a_are_equals(the_ka, _90): # order matters: has to be before the check on mclc1
bravais_info['variation'] = 'mclc2'
csi = (2. - the_b * the_cosa / the_c) / (4. * (1. - the_cosa ** 2))
psi = 0.75 - the_a ** 2 / (4. * the_b * (1. - the_cosa ** 2))
bravais_info['extra'] = {'csi': csi,
'eta': 0.5 + 2. * csi * the_c * the_cosa / the_b,
'psi': psi,
'phi': psi + (0.75 - psi) * the_b * the_cosa / the_c,
}
elif the_ka < 0.:
bravais_info['variation'] = 'mclc1'
csi = (2. - the_b * the_cosa / the_c) / (4. * (1. - the_cosa ** 2))
psi = 0.75 - the_a ** 2 / (4. * the_b * (1. - the_cosa ** 2))
bravais_info['extra'] = {'csi': csi,
'eta': 0.5 + 2. * csi * the_c * the_cosa / the_b,
'psi': psi,
'phi': psi + (0.75 - psi) * the_b * the_cosa / the_c,
}
else: # if the_ka>0.:
x = the_b * the_cosa / the_c + the_b ** 2 * (1. - the_cosa ** 2) / the_a ** 2
if a_are_equals(x, 1.):
bravais_info['variation'] = 'mclc4' # order matters here too
mu = (1. + (the_b / the_a) ** 2) / 4.
dlt = the_b * the_c * the_cosa / (2. * the_a ** 2)
csi = mu - 0.25 + (1. - the_b * the_cosa / the_c) / (4. * (1. - the_cosa ** 2))
eta = 0.5 + 2. * csi * the_c * the_cosa / the_b
phi = 1. + eta - 2. * mu
psi = eta - 2. * dlt
bravais_info['extra'] = {'mu': mu,
'dlt': dlt,
'csi': csi,
'eta': eta,
'phi': phi,
'psi': psi,
}
elif x < 1.:
bravais_info['variation'] = 'mclc3'
mu = (1. + (the_b / the_a) ** 2) / 4.
dlt = the_b * the_c * the_cosa / (2. * the_a ** 2)
csi = mu - 0.25 + (1. - the_b * the_cosa / the_c) / (4. * (1. - the_cosa ** 2))
eta = 0.5 + 2. * csi * the_c * the_cosa / the_b
phi = 1. + eta - 2. * mu
psi = eta - 2. * dlt
bravais_info['extra'] = {'mu': mu,
'dlt': dlt,
'csi': csi,
'eta': eta,
'phi': phi,
'psi': psi,
}
elif x > 1.:
bravais_info['variation'] = 'mclc5'
csi = ((the_b / the_a) ** 2 + (1. - the_b * the_cosa / the_c) / (1. - the_cosa ** 2)) / 4.
eta = 0.5 + 2. * csi * the_c * the_cosa / the_b
mu = eta / 2. + the_b ** 2 / 4. / the_a ** 2 - the_b * the_c * the_cosa / 2. / the_a ** 2
nu = 2. * mu - csi
omg = (4. * nu - 1. - the_b ** 2 * (1. - the_cosa ** 2) / the_a ** 2) * the_c / (
2. * the_b * the_cosa)
dlt = csi * the_c * the_cosa / the_b + omg / 2. - 0.25
rho = 1. - csi * the_a ** 2 / the_b ** 2
bravais_info['extra'] = {'mu': mu,
'dlt': dlt,
'csi': csi,
'eta': eta,
'rho': rho,
}
# if it doesn't fall in the above, is triclinic
else:
bravais_info = {'short_name': 'tri',
'extended_name': 'triclinic',
'index': 14,
}
# the check for triclinic variations is at the end of the method
else: # if comparison_length.count(True)==0:
fco1 = c ** 2 / numpy.sqrt((a ** 2 + c ** 2) * (b ** 2 + c ** 2))
fco2 = a ** 2 / numpy.sqrt((a ** 2 + b ** 2) * (a ** 2 + c ** 2))
fco3 = b ** 2 / numpy.sqrt((a ** 2 + b ** 2) * (b ** 2 + c ** 2))
# ==============#
# orthorhombic #
# ==============#
if comparison_angles.count(True) == 3:
bravais_info = {'short_name': 'orc',
'extended_name': 'orthorhombic',
'index': 6,
}
lens = [a, b, c]
ind_a = lens.index(min(lens))
ind_c = lens.index(max(lens))
if ind_a == 0 and ind_c == 1:
bravais_info['permutation'] = [0, 2, 1]
if ind_a == 0 and ind_c == 2:
bravais_info['permutation'] = [0, 1, 2]
if ind_a == 1 and ind_c == 0:
bravais_info['permutation'] = [1, 2, 0]
if ind_a == 1 and ind_c == 2:
bravais_info['permutation'] = [1, 0, 2]
if ind_a == 2 and ind_c == 0:
bravais_info['permutation'] = [2, 1, 0]
if ind_a == 2 and ind_c == 1:
bravais_info['permutation'] = [2, 0, 1]
# ============#
# monoclinic #
# ============#
elif (comparison_angles.count(True) == 1 and
any([a_are_equals(cosa, _90), a_are_equals(cosb, _90), a_are_equals(cosc, _90)])):
bravais_info = {'short_name': 'mcl',
'extended_name': 'monoclinic',
'index': 12,
}
lens = [a, b, c]
# find the angle different from 90
# then order (if possible) a<b<c
if not a_are_equals(cosa, _90):
the_cosa = cosa
the_a = min(a, b)
the_b = max(a, b)
the_c = c
if lens.index(the_a) == 0:
bravais_info['permutation'] = [0, 1, 2]
else:
bravais_info['permutation'] = [1, 0, 2]
elif not a_are_equals(cosb, _90):
the_cosa = cosb
the_a = min(a, c)
the_b = max(a, c)
the_c = b
if lens.index(the_a) == 0:
bravais_info['permutation'] = [0, 2, 1]
else:
bravais_info['permutation'] = [1, 2, 0]
else: # if not _are_equals(cosc,_90):
the_cosa = cosc
the_a = min(b, c)
the_b = max(b, c)
the_c = a
if lens.index(the_a) == 1:
bravais_info['permutation'] = [2, 0, 1]
else:
bravais_info['permutation'] = [2, 1, 0]
eta = (1. - the_b * the_cosa / the_c) / (2. * (1. - the_cosa ** 2))
bravais_info['extra'] = {'eta': eta,
'nu': 0.5 - eta * the_c * the_cosa / the_b,
}
# ============================#
# face centered orthorhombic #
# ============================#
elif (any([a_are_equals(cosa, fco1), a_are_equals(cosb, fco1), a_are_equals(cosc, fco1)]) and
any([a_are_equals(cosa, fco2), a_are_equals(cosb, fco2), a_are_equals(cosc, fco2)]) and
any([a_are_equals(cosa, fco3), a_are_equals(cosb, fco3), a_are_equals(cosc, fco3)])
):
bravais_info = {'short_name': 'orcf',
'extended_name': 'face centered orthorhombic',
'index': 7,
}
lens = [a, b, c]
ind_a1 = lens.index(max(lens))
ind_a3 = lens.index(min(lens))
if ind_a1 == 0 and ind_a3 == 2:
bravais_info['permutation'] = [0, 1, 2]
the_a1 = a1
the_a2 = a2
the_a3 = a3
elif ind_a1 == 0 and ind_a3 == 1:
bravais_info['permutation'] = [0, 2, 1]
the_a1 = a1
the_a2 = a3
the_a3 = a2
elif ind_a1 == 1 and ind_a3 == 2:
bravais_info['permutation'] = [1, 0, 2]
the_a1 = a2
the_a2 = a1
the_a3 = a3
elif ind_a1 == 1 and ind_a3 == 0:
bravais_info['permutation'] = [2, 0, 1]
the_a1 = a3
the_a2 = a1
the_a3 = a2
elif ind_a1 == 2 and ind_a3 == 1:
bravais_info['permutation'] = [1, 2, 0]
the_a1 = a2
the_a2 = a3
the_a3 = a1
else: # ind_a1 == 2 and ind_a3 == 0:
bravais_info['permutation'] = [2, 1, 0]
the_a1 = a3
the_a2 = a2
the_a3 = a1
the_a = numpy.linalg.norm(- the_a1 + the_a2 + the_a3)
the_b = numpy.linalg.norm(+ the_a1 - the_a2 + the_a3)
the_c = numpy.linalg.norm(+ the_a1 + the_a2 - the_a3)
fco4 = 1. / the_a ** 2 - 1. / the_b ** 2 - 1. / the_c ** 2
# orcf3
if a_are_equals(fco4, 0.):
bravais_info['variation'] = 'orcf3' # order matters
bravais_info['extra'] = {'csi': (1. + (the_a / the_b) ** 2 - (the_a / the_c) ** 2) / 4.,
'eta': (1. + (the_a / the_b) ** 2 + (the_a / the_c) ** 2) / 4.,
}
# orcf1
elif fco4 > 0.:
bravais_info['variation'] = 'orcf1'
bravais_info['extra'] = {'csi': (1. + (the_a / the_b) ** 2 - (the_a / the_c) ** 2) / 4.,
'eta': (1. + (the_a / the_b) ** 2 + (the_a / the_c) ** 2) / 4.,
}
# orcf2
else:
bravais_info['variation'] = 'orcf2'
bravais_info['extra'] = {'eta': (1. + (the_a / the_b) ** 2 - (the_a / the_c) ** 2) / 4.,
'dlt': (1. + (the_b / the_a) ** 2 + (the_b / the_c) ** 2) / 4.,
'phi': (1. + (the_c / the_b) ** 2 - (the_c / the_a) ** 2) / 4.,
}
else:
bravais_info = {'short_name': 'tri',
'extended_name': 'triclinic',
'index': 14,
}
# ===========#
# triclinic #
# ===========#
# still miss the variations of triclinic
if bravais_info['short_name'] == 'tri':
lens = [a, b, c]
ind_a = lens.index(min(lens))
ind_c = lens.index(max(lens))
if ind_a == 0 and ind_c == 1:
the_a = a
the_b = c
the_c = b
the_cosa = cosa
the_cosb = cosc
the_cosc = cosb
bravais_info['permutation'] = [0, 2, 1]
if ind_a == 0 and ind_c == 2:
the_a = a
the_b = b
the_c = c
the_cosa = cosa
the_cosb = cosb
the_cosc = cosc
bravais_info['permutation'] = [0, 1, 2]
if ind_a == 1 and ind_c == 0:
the_a = b
the_b = c
the_c = a
the_cosa = cosb
the_cosb = cosc
the_cosc = cosa
bravais_info['permutation'] = [1, 0, 2]
if ind_a == 1 and ind_c == 2:
the_a = b
the_b = a
the_c = c
the_cosa = cosb
the_cosb = cosa
the_cosc = cosc
bravais_info['permutation'] = [1, 0, 2]
if ind_a == 2 and ind_c == 0:
the_a = c
the_b = b
the_c = a
the_cosa = cosc
the_cosb = cosb
the_cosc = cosa
bravais_info['permutation'] = [2, 1, 0]
if ind_a == 2 and ind_c == 1:
the_a = c
the_b = a
the_c = b
the_cosa = cosc
the_cosb = cosa
the_cosc = cosb
bravais_info['permutation'] = [2, 0, 1]
if the_cosa < 0. and the_cosb < 0.:
if a_are_equals(the_cosc, 0.):
bravais_info['variation'] = 'tri2a'
elif the_cosc < 0.:
bravais_info['variation'] = 'tri1a'
else:
raise ValueError('Structure erroneously fell into the triclinic (a) case')
elif the_cosa > 0. and the_cosb > 0.:
if a_are_equals(the_cosc, 0.):
bravais_info['variation'] = 'tri2b'
elif the_cosc > 0.:
bravais_info['variation'] = 'tri1b'
else:
raise ValueError('Structure erroneously fell into the triclinic (b) case')
else:
raise ValueError('Structure erroneously fell into the triclinic case')
elif dimension == 2:
# ========================================#
# 2D case -> 5 possible Bravais lattices #
# ========================================#
# find the two in-plane lattice vectors
out_of_plane_index = pbc.index(False) # the non-periodic dimension
in_plane_indexes = list(set(range(3)) - set([out_of_plane_index]))
# in_plane_indexes are the indexes of the two dimensions (e.g. [0,1])
# build a length-2 list with the 2D cell lattice vectors
list_vectors = ['a1', 'a2', 'a3']
vectors = [eval(list_vectors[i]) for i in in_plane_indexes]
# build a length-2 list with the norms of the 2D cell lattice vectors
lens = [numpy.linalg.norm(v) for v in vectors]
# cosine of the angle between the two primitive vectors
list_angles = ['cosa', 'cosb', 'cosc']
cosphi = eval(list_angles[out_of_plane_index])
comparison_length = l_are_equals(lens[0], lens[1])
comparison_angle_90 = a_are_equals(cosphi, _90)
# ================#
# square lattice #
# ================#
if comparison_angle_90 and comparison_length:
bravais_info = {'short_name': 'sq',
'extended_name': 'square',
'index': 1,
}
# =========================#
# (primitive) rectangular #
# =========================#
elif comparison_angle_90:
bravais_info = {'short_name': 'rec',
'extended_name': 'rectangular',
'index': 2,
}
# set the order such that first_vector < second_vector in norm
if lens[0] > lens[1]:
in_plane_indexes.reverse()
# ===========#
# hexagonal #
# ===========#
# this has to be put before the centered-rectangular case
elif (l_are_equals(lens[0], lens[1]) and a_are_equals(cosphi, _120)):
bravais_info = {'short_name': 'hex',
'extended_name': 'hexagonal',
'index': 4,
}
# ======================#
# centered rectangular #
# ======================#
elif (comparison_length and
l_are_equals(numpy.dot(vectors[0] + vectors[1],
vectors[0] - vectors[1]), 0.)):
bravais_info = {'short_name': 'recc',
'extended_name': 'centered rectangular',
'index': 3,
}
# =========#
# oblique #
# =========#
else:
bravais_info = {'short_name': 'obl',
'extended_name': 'oblique',
'index': 5,
}
# set the order such that first_vector < second_vector in norm
if lens[0] > lens[1]:
in_plane_indexes.reverse()
# the permutation is set such that p[2]=out_of_plane_index (third
# new axis is always the non-periodic out-of-plane axis)
# TODO: check that this (and the special points permutation of
# coordinates) works also when the out-of-plane axis is not aligned
# with one of the cartesian axis (I suspect that it doesn't...)
permutation = in_plane_indexes + [out_of_plane_index]
bravais_info['permutation'] = permutation
elif dimension <= 1:
# ====================================================#
# 0D & 1D cases -> only one possible Bravais lattice #
# ====================================================#
if dimension == 1:
# TODO: check that this (and the special points permutation of
# coordinates) works also when the 1D axis is not aligned
# with one of the cartesian axis (I suspect that it doesn't...)
in_line_index = pbc.index(True) # the only periodic dimension
# the permutation is set such that p[0]=in_line_index (the 2 last
# axes are always the non-periodic ones)
permutation = [in_line_index] + list(set(range(3)) - set([in_line_index]))
else:
permutation = [0, 1, 2]
bravais_info = {
'short_name': '{}D'.format(dimension),
'extended_name': '{}D'.format(dimension),
'index': 1,
'permutation': permutation,
}
return bravais_info
def get_kpoints_path(cell, pbc=None, cartesian=False,
epsilon_length=_default_epsilon_length,
epsilon_angle=_default_epsilon_angle):
"""
Get the special point and path of a given structure.
.. note:: in 3D, this implementation expects
that the structure is already standardized according to the Setyawan
paper. If this is not the case, the kpoints and band structure returned will be incorrect. The only case
that is dealt correctly by the library is the case when axes are swapped, where the library correctly
takes this swapping/rotation into account to assign kpoint labels and coordinates.
- In 2D, coordinates are based on the paper:
R. Ramirez and M. C. Bohm, Int. J. Quant. Chem., XXX, pp. 391-411 (1986)
- In 3D, coordinates are based on the paper:
W. Setyawan, S. Curtarolo, Comp. Mat. Sci. 49, 299 (2010)
:param cell: 3x3 array representing the structure cell lattice vectors
:param pbc: 3-dimensional array of booleans signifying the periodic boundary
conditions along each lattice vector
:param cartesian: If true, returns points in cartesian coordinates.
Crystal coordinates otherwise. Default=False
:param epsilon_length: threshold on lengths comparison, used
to get the bravais lattice info
:param epsilon_angle: threshold on angles comparison, used
to get the bravais lattice info
:return special_points,path: special_points: a dictionary of
point_name:point_coords key,values.
path: the suggested path which goes through all high symmetry
lines. A list of lists for all path segments.
e.g. ``[('G','X'),('X','M'),...]``
It's not necessarily a continuous line.
:note: We assume that the cell given by the cell property is the
primitive unit cell
"""
# recognize which bravais lattice we are dealing with
bravais_info = find_bravais_info(
cell=cell, pbc=pbc,
epsilon_length=epsilon_length,
epsilon_angle=epsilon_angle
)
analysis = analyze_cell(cell, pbc)
dimension = analysis['dimension']
reciprocal_cell = analysis['reciprocal_cell']
# pick the information about the special k-points.
# it depends on the dimensionality and the Bravais lattice number.
if dimension == 3:
# 3D case: 14 Bravais lattices
# simple cubic
if bravais_info['index'] == 1:
special_points = {'G': [0., 0., 0.],
'M': [0.5, 0.5, 0.],
'R': [0.5, 0.5, 0.5],
'X': [0., 0.5, 0.],
}
path = [('G', 'X'),
('X', 'M'),
('M', 'G'),
('G', 'R'),
('R', 'X'),
('M', 'R'),
]
# face centered cubic
elif bravais_info['index'] == 2:
special_points = {'G': [0., 0., 0.],
'K': [3. / 8., 3. / 8., 0.75],
'L': [0.5, 0.5, 0.5],
'U': [5. / 8., 0.25, 5. / 8.],
'W': [0.5, 0.25, 0.75],
'X': [0.5, 0., 0.5],
}
path = [('G', 'X'),
('X', 'W'),
('W', 'K'),
('K', 'G'),
('G', 'L'),
('L', 'U'),
('U', 'W'),
('W', 'L'),
('L', 'K'),
('U', 'X'),
]
# body centered cubic
elif bravais_info['index'] == 3:
special_points = {'G': [0., 0., 0.],
'H': [0.5, -0.5, 0.5],
'P': [0.25, 0.25, 0.25],
'N': [0., 0., 0.5],
}
path = [('G', 'H'),
('H', 'N'),
('N', 'G'),
('G', 'P'),
('P', 'H'),
('P', 'N'),
]
# Tetragonal
elif bravais_info['index'] == 4:
special_points = {'G': [0., 0., 0.],
'A': [0.5, 0.5, 0.5],
'M': [0.5, 0.5, 0.],
'R': [0., 0.5, 0.5],
'X': [0., 0.5, 0.],
'Z': [0., 0., 0.5],
}
path = [('G', 'X'),
('X', 'M'),
('M', 'G'),
('G', 'Z'),
('Z', 'R'),
('R', 'A'),
('A', 'Z'),
('X', 'R'),
('M', 'A'),
]
# body centered tetragonal
elif bravais_info['index'] == 5:
if bravais_info['variation'] == 'bct1':
# Body centered tetragonal bct1
eta = bravais_info['extra']['eta']
special_points = {'G': [0., 0., 0.],
'M': [-0.5, 0.5, 0.5],
'N': [0., 0.5, 0.],
'P': [0.25, 0.25, 0.25],
'X': [0., 0., 0.5],
'Z': [eta, eta, -eta],
'Z1': [-eta, 1. - eta, eta],
}
path = [('G', 'X'),
('X', 'M'),
('M', 'G'),
('G', 'Z'),
('Z', 'P'),
('P', 'N'),
('N', 'Z1'),
('Z1', 'M'),
('X', 'P'),
]
else: # bct2
# Body centered tetragonal bct2
eta = bravais_info['extra']['eta']
csi = bravais_info['extra']['csi']
special_points = {
'G': [0., 0., 0.],
'N': [0., 0.5, 0.],
'P': [0.25, 0.25, 0.25],
'S': [-eta, eta, eta],
'S1': [eta, 1 - eta, -eta],
'X': [0., 0., 0.5],
'Y': [-csi, csi, 0.5],
'Y1': [0.5, 0.5, -csi],
'Z': [0.5, 0.5, -0.5],
}
path = [('G', 'X'),
('X', 'Y'),
('Y', 'S'),
('S', 'G'),
('G', 'Z'),
('Z', 'S1'),
('S1', 'N'),
('N', 'P'),
('P', 'Y1'),
('Y1', 'Z'),
('X', 'P'),
]
# orthorhombic
elif bravais_info['index'] == 6:
special_points = {'G': [0., 0., 0.],
'R': [0.5, 0.5, 0.5],
'S': [0.5, 0.5, 0.],
'T': [0., 0.5, 0.5],
'U': [0.5, 0., 0.5],
'X': [0.5, 0., 0.],
'Y': [0., 0.5, 0.],
'Z': [0., 0., 0.5],
}
path = [('G', 'X'),
('X', 'S'),
('S', 'Y'),
('Y', 'G'),
('G', 'Z'),
('Z', 'U'),
('U', 'R'),
('R', 'T'),
('T', 'Z'),
('Y', 'T'),
('U', 'X'),
('S', 'R'),
]
# face centered orthorhombic
elif bravais_info['index'] == 7:
if bravais_info['variation'] == 'orcf1':
csi = bravais_info['extra']['csi']
eta = bravais_info['extra']['eta']
special_points = {'G': [0., 0., 0.],
'A': [0.5, 0.5 + csi, csi],
'A1': [0.5, 0.5 - csi, 1. - csi],
'L': [0.5, 0.5, 0.5],
'T': [1., 0.5, 0.5],
'X': [0., eta, eta],
'X1': [1., 1. - eta, 1. - eta],
'Y': [0.5, 0., 0.5],
'Z': [0.5, 0.5, 0.],
}
path = [('G', 'Y'),
('Y', 'T'),
('T', 'Z'),
('Z', 'G'),
('G', 'X'),
('X', 'A1'),
('A1', 'Y'),
('T', 'X1'),
('X', 'A'),
('A', 'Z'),
('L', 'G'),
]
elif bravais_info['variation'] == 'orcf2':
eta = bravais_info['extra']['eta']
dlt = bravais_info['extra']['dlt']
phi = bravais_info['extra']['phi']
special_points = {'G': [0., 0., 0.],
'C': [0.5, 0.5 - eta, 1. - eta],
'C1': [0.5, 0.5 + eta, eta],
'D': [0.5 - dlt, 0.5, 1. - dlt],
'D1': [0.5 + dlt, 0.5, dlt],
'L': [0.5, 0.5, 0.5],
'H': [1. - phi, 0.5 - phi, 0.5],
'H1': [phi, 0.5 + phi, 0.5],
'X': [0., 0.5, 0.5],
'Y': [0.5, 0., 0.5],
'Z': [0.5, 0.5, 0.],
}
path = [('G', 'Y'),
('Y', 'C'),
('C', 'D'),
('D', 'X'),
('X', 'G'),
('G', 'Z'),
('Z', 'D1'),
('D1', 'H'),
('H', 'C'),
('C1', 'Z'),
('X', 'H1'),
('H', 'Y'),
('L', 'G'),
]
else:
csi = bravais_info['extra']['csi']
eta = bravais_info['extra']['eta']
special_points = {'G': [0., 0., 0.],
'A': [0.5, 0.5 + csi, csi],
'A1': [0.5, 0.5 - csi, 1. - csi],
'L': [0.5, 0.5, 0.5],
'T': [1., 0.5, 0.5],
'X': [0., eta, eta],
'X1': [1., 1. - eta, 1. - eta],
'Y': [0.5, 0., 0.5],
'Z': [0.5, 0.5, 0.],
}
path = [('G', 'Y'),
('Y', 'T'),
('T', 'Z'),
('Z', 'G'),
('G', 'X'),
('X', 'A1'),
('A1', 'Y'),
('X', 'A'),
('A', 'Z'),
('L', 'G'),
]
# Body centered orthorhombic
elif bravais_info['index'] == 8:
csi = bravais_info['extra']['csi']
dlt = bravais_info['extra']['dlt']
eta = bravais_info['extra']['eta']
mu = bravais_info['extra']['mu']
special_points = {'G': [0., 0., 0.],
'L': [-mu, mu, 0.5 - dlt],
'L1': [mu, -mu, 0.5 + dlt],
'L2': [0.5 - dlt, 0.5 + dlt, -mu],
'R': [0., 0.5, 0.],
'S': [0.5, 0., 0.],
'T': [0., 0., 0.5],
'W': [0.25, 0.25, 0.25],
'X': [-csi, csi, csi],
'X1': [csi, 1. - csi, -csi],
'Y': [eta, -eta, eta],
'Y1': [1. - eta, eta, -eta],
'Z': [0.5, 0.5, -0.5],
}
path = [('G', 'X'),
('X', 'L'),
('L', 'T'),
('T', 'W'),
('W', 'R'),
('R', 'X1'),
('X1', 'Z'),
('Z', 'G'),
('G', 'Y'),
('Y', 'S'),
('S', 'W'),
('L1', 'Y'),
('Y1', 'Z'),
]
# C-centered orthorhombic
elif bravais_info['index'] == 9:
csi = bravais_info['extra']['csi']
special_points = {'G': [0., 0., 0.],
'A': [csi, csi, 0.5],
'A1': [-csi, 1. - csi, 0.5],
'R': [0., 0.5, 0.5],
'S': [0., 0.5, 0.],
'T': [-0.5, 0.5, 0.5],
'X': [csi, csi, 0.],
'X1': [-csi, 1. - csi, 0.],
'Y': [-0.5, 0.5, 0.],
'Z': [0., 0., 0.5],
}
path = [('G', 'X'),
('X', 'S'),
('S', 'R'),
('R', 'A'),
('A', 'Z'),
('Z', 'G'),
('G', 'Y'),
('Y', 'X1'),
('X1', 'A1'),
('A1', 'T'),
('T', 'Y'),
('Z', 'T'),
]
# Hexagonal
elif bravais_info['index'] == 10:
special_points = {'G': [0., 0., 0.],
'A': [0., 0., 0.5],
'H': [1. / 3., 1. / 3., 0.5],
'K': [1. / 3., 1. / 3., 0.],
'L': [0.5, 0., 0.5],
'M': [0.5, 0., 0.],
}
path = [('G', 'M'),
('M', 'K'),
('K', 'G'),
('G', 'A'),
('A', 'L'),
('L', 'H'),
('H', 'A'),
('L', 'M'),
('K', 'H'),
]
# rhombohedral
elif bravais_info['index'] == 11:
if bravais_info['variation'] == 'rhl1':
eta = bravais_info['extra']['eta']
nu = bravais_info['extra']['nu']
special_points = {'G': [0., 0., 0.],
'B': [eta, 0.5, 1. - eta],
'B1': [0.5, 1. - eta, eta - 1.],
'F': [0.5, 0.5, 0.],
'L': [0.5, 0., 0.],
'L1': [0., 0., -0.5],
'P': [eta, nu, nu],
'P1': [1. - nu, 1. - nu, 1. - eta],
'P2': [nu, nu, eta - 1.],
'Q': [1. - nu, nu, 0.],
'X': [nu, 0., -nu],
'Z': [0.5, 0.5, 0.5],
}
path = [('G', 'L'),
('L', 'B1'),
('B', 'Z'),
('Z', 'G'),
('G', 'X'),
('Q', 'F'),
('F', 'P1'),
('P1', 'Z'),
('L', 'P'),
]
else: # Rhombohedral rhl2
eta = bravais_info['extra']['eta']
nu = bravais_info['extra']['nu']
special_points = {'G': [0., 0., 0.],
'F': [0.5, -0.5, 0.],
'L': [0.5, 0., 0.],
'P': [1. - nu, -nu, 1. - nu],
'P1': [nu, nu - 1., nu - 1.],
'Q': [eta, eta, eta],
'Q1': [1. - eta, -eta, -eta],
'Z': [0.5, -0.5, 0.5],
}
path = [('G', 'P'),
('P', 'Z'),
('Z', 'Q'),
('Q', 'G'),
('G', 'F'),
('F', 'P1'),
('P1', 'Q1'),
('Q1', 'L'),
('L', 'Z'),
]
# monoclinic
elif bravais_info['index'] == 12:
eta = bravais_info['extra']['eta']
nu = bravais_info['extra']['nu']
special_points = {'G': [0., 0., 0.],
'A': [0.5, 0.5, 0.],
'C': [0., 0.5, 0.5],
'D': [0.5, 0., 0.5],
'D1': [0.5, 0., -0.5],
'E': [0.5, 0.5, 0.5],
'H': [0., eta, 1. - nu],
'H1': [0., 1. - eta, nu],
'H2': [0., eta, -nu],
'M': [0.5, eta, 1. - nu],
'M1': [0.5, 1. - eta, nu],
'M2': [0.5, eta, -nu],
'X': [0., 0.5, 0.],
'Y': [0., 0., 0.5],
'Y1': [0., 0., -0.5],
'Z': [0.5, 0., 0.],
}
path = [('G', 'Y'),
('Y', 'H'),
('H', 'C'),
('C', 'E'),
('E', 'M1'),
('M1', 'A'),
('A', 'X'),
('X', 'H1'),
('M', 'D'),
('D', 'Z'),
('Y', 'D'),
]
elif bravais_info['index'] == 13:
if bravais_info['variation'] == 'mclc1':
csi = bravais_info['extra']['csi']
eta = bravais_info['extra']['eta']
psi = bravais_info['extra']['psi']
phi = bravais_info['extra']['phi']
special_points = {'G': [0., 0., 0.],
'N': [0.5, 0., 0.],
'N1': [0., -0.5, 0.],
'F': [1. - csi, 1. - csi, 1. - eta],
'F1': [csi, csi, eta],
'F2': [csi, -csi, 1. - eta],
'F3': [1. - csi, -csi, 1. - eta],
'I': [phi, 1. - phi, 0.5],
'I1': [1. - phi, phi - 1., 0.5],
'L': [0.5, 0.5, 0.5],
'M': [0.5, 0., 0.5],
'X': [1. - psi, psi - 1., 0.],
'X1': [psi, 1. - psi, 0.],
'X2': [psi - 1., -psi, 0.],
'Y': [0.5, 0.5, 0.],
'Y1': [-0.5, -0.5, 0.],
'Z': [0., 0., 0.5],
}
path = [('G', 'Y'),
('Y', 'F'),
('F', 'L'),
('L', 'I'),
('I1', 'Z'),
('Z', 'F1'),
('Y', 'X1'),
('X', 'G'),
('G', 'N'),
('M', 'G'),
]
elif bravais_info['variation'] == 'mclc2':
csi = bravais_info['extra']['csi']
eta = bravais_info['extra']['eta']
psi = bravais_info['extra']['psi']
phi = bravais_info['extra']['phi']
special_points = {'G': [0., 0., 0.],
'N': [0.5, 0., 0.],
'N1': [0., -0.5, 0.],
'F': [1. - csi, 1. - csi, 1. - eta],
'F1': [csi, csi, eta],
'F2': [csi, -csi, 1. - eta],
'F3': [1. - csi, -csi, 1. - eta],
'I': [phi, 1. - phi, 0.5],
'I1': [1. - phi, phi - 1., 0.5],
'L': [0.5, 0.5, 0.5],
'M': [0.5, 0., 0.5],
'X': [1. - psi, psi - 1., 0.],
'X1': [psi, 1. - psi, 0.],
'X2': [psi - 1., -psi, 0.],
'Y': [0.5, 0.5, 0.],
'Y1': [-0.5, -0.5, 0.],
'Z': [0., 0., 0.5],
}
path = [('G', 'Y'),
('Y', 'F'),
('F', 'L'),
('L', 'I'),
('I1', 'Z'),
('Z', 'F1'),
('N', 'G'),
('G', 'M'),
]
elif bravais_info['variation'] == 'mclc3':
mu = bravais_info['extra']['mu']
dlt = bravais_info['extra']['dlt']
csi = bravais_info['extra']['csi']
eta = bravais_info['extra']['eta']
phi = bravais_info['extra']['phi']
psi = bravais_info['extra']['psi']
special_points = {
'G': [0., 0., 0.],
'F': [1. - phi, 1 - phi, 1. - psi],
'F1': [phi, phi - 1., psi],
'F2': [1. - phi, -phi, 1. - psi],
'H': [csi, csi, eta],
'H1': [1. - csi, -csi, 1. - eta],
'H2': [-csi, -csi, 1. - eta],
'I': [0.5, -0.5, 0.5],
'M': [0.5, 0., 0.5],
'N': [0.5, 0., 0.],
'N1': [0., -0.5, 0.],
'X': [0.5, -0.5, 0.],
'Y': [mu, mu, dlt],
'Y1': [1. - mu, -mu, -dlt],
'Y2': [-mu, -mu, -dlt],
'Y3': [mu, mu - 1., dlt],
'Z': [0., 0., 0.5],
}
path = [('G', 'Y'),
('Y', 'F'),
('F', 'H'),
('H', 'Z'),
('Z', 'I'),
('I', 'F1'),
('H1', 'Y1'),
('Y1', 'X'),
('X', 'F'),
('G', 'N'),
('M', 'G'),
]
elif bravais_info['variation'] == 'mclc4':
mu = bravais_info['extra']['mu']
dlt = bravais_info['extra']['dlt']
csi = bravais_info['extra']['csi']
eta = bravais_info['extra']['eta']
phi = bravais_info['extra']['phi']
psi = bravais_info['extra']['psi']
special_points = {'G': [0., 0., 0.],
'F': [1. - phi, 1 - phi, 1. - psi],
'F1': [phi, phi - 1., psi],
'F2': [1. - phi, -phi, 1. - psi],
'H': [csi, csi, eta],
'H1': [1. - csi, -csi, 1. - eta],
'H2': [-csi, -csi, 1. - eta],
'I': [0.5, -0.5, 0.5],
'M': [0.5, 0., 0.5],
'N': [0.5, 0., 0.],
'N1': [0., -0.5, 0.],
'X': [0.5, -0.5, 0.],
'Y': [mu, mu, dlt],
'Y1': [1. - mu, -mu, -dlt],
'Y2': [-mu, -mu, -dlt],
'Y3': [mu, mu - 1., dlt],
'Z': [0., 0., 0.5],
}
path = [('G', 'Y'),
('Y', 'F'),
('F', 'H'),
('H', 'Z'),
('Z', 'I'),
('H1', 'Y1'),
('Y1', 'X'),
('X', 'G'),
('G', 'N'),
('M', 'G'),
]
else:
csi = bravais_info['extra']['csi']
mu = bravais_info['extra']['mu']
omg = bravais_info['extra']['omg']
eta = bravais_info['extra']['eta']
nu = bravais_info['extra']['nu']
dlt = bravais_info['extra']['dlt']
rho = bravais_info['extra']['rho']
special_points = {
'G': [0., 0., 0.],
'F': [nu, nu, omg],
'F1': [1. - nu, 1. - nu, 1. - omg],
'F2': [nu, nu - 1., omg],
'H': [csi, csi, eta],
'H1': [1. - csi, -csi, 1. - eta],
'H2': [-csi, -csi, 1. - eta],
'I': [rho, 1. - rho, 0.5],
'I1': [1. - rho, rho - 1., 0.5],
'L': [0.5, 0.5, 0.5],
'M': [0.5, 0., 0.5],
'N': [0.5, 0., 0.],
'N1': [0., -0.5, 0.],
'X': [0.5, -0.5, 0.],
'Y': [mu, mu, dlt],
'Y1': [1. - mu, -mu, -dlt],
'Y2': [-mu, -mu, -dlt],
'Y3': [mu, mu - 1., dlt],
'Z': [0., 0., 0.5],
}
path = [('G', 'Y'),
('Y', 'F'),
('F', 'L'),
('L', 'I'),
('I1', 'Z'),
('Z', 'H'),
('H', 'F1'),
('H1', 'Y1'),
('Y1', 'X'),
('X', 'G'),
('G', 'N'),
('M', 'G'),
]
# triclinic
elif bravais_info['index'] == 14:
if bravais_info['variation'] == 'tri1a' or bravais_info['variation'] == 'tri2a':
special_points = {'G': [0.0, 0.0, 0.0],
'L': [0.5, 0.5, 0.0],
'M': [0.0, 0.5, 0.5],
'N': [0.5, 0.0, 0.5],
'R': [0.5, 0.5, 0.5],
'X': [0.5, 0.0, 0.0],
'Y': [0.0, 0.5, 0.0],
'Z': [0.0, 0.0, 0.5],
}
path = [('X', 'G'),
('G', 'Y'),
('L', 'G'),
('G', 'Z'),
('N', 'G'),
('G', 'M'),
('R', 'G'),
]
else:
special_points = {'G': [0.0, 0.0, 0.0],
'L': [0.5, -0.5, 0.0],
'M': [0.0, 0.0, 0.5],
'N': [-0.5, -0.5, 0.5],
'R': [0.0, -0.5, 0.5],
'X': [0.0, -0.5, 0.0],
'Y': [0.5, 0.0, 0.0],
'Z': [-0.5, 0.0, 0.5],
}
path = [('X', 'G'),
('G', 'Y'),
('L', 'G'),
('G', 'Z'),
('N', 'G'),
('G', 'M'),
('R', 'G'),
]
elif dimension == 2:
# 2D case: 5 Bravais lattices
if bravais_info['index'] == 1:
# square
special_points = {'G': [0., 0., 0.],
'M': [0.5, 0.5, 0.],
'X': [0.5, 0., 0.],
}
path = [('G', 'X'),
('X', 'M'),
('M', 'G'),
]
elif bravais_info['index'] == 2:
# (primitive) rectangular
special_points = {'G': [0., 0., 0.],
'X': [0.5, 0., 0.],
'Y': [0., 0.5, 0.],
'S': [0.5, 0.5, 0.],
}
path = [('G', 'X'),
('X', 'S'),
('S', 'Y'),
('Y', 'G'),
]
elif bravais_info['index'] == 3:
# centered rectangular (rhombic)
# TODO: this looks quite different from the in-plane part of the
# 3D C-centered orthorhombic lattice, which is strange...
# NOTE: special points below are in (b1, b2) fractional
# coordinates (primitive reciprocal cell) as for the rest.
# Ramirez & Bohn gave them initially in (s1=b1+b2, s2=-b1+b2)
# coordinates, i.e. using the conventional reciprocal cell.
special_points = {'G': [0., 0., 0.],
'X': [0.5, 0.5, 0.],
'Y1': [0.25, 0.75, 0.],
'Y': [-0.25, 0.25, 0.], # typo in p. 404 of Ramirez & Bohm (should be Y=(0,1/4))
'C': [0., 0.5, 0.],
}
path = [('Y1', 'X'),
('X', 'G'),
('G', 'Y'),
('Y', 'C'),
]
elif bravais_info['index'] == 4:
# hexagonal
special_points = {'G': [0., 0., 0.],
'M': [0.5, 0., 0.],
'K': [1. / 3., 1. / 3., 0.],
}
path = [('G', 'M'),
('M', 'K'),
('K', 'G'),
]
elif bravais_info['index'] == 5:
# oblique
# NOTE: only end-points are high-symmetry points (not the path
# in-between)
special_points = {'G': [0., 0., 0.],
'X': [0.5, 0., 0.],
'Y': [0., 0.5, 0.],
'A': [0.5, 0.5, 0.],
}
path = [('X', 'G'),
('G', 'Y'),
('A', 'G'),
]
elif dimension == 1:
# 1D case: 1 Bravais lattice
special_points = {'G': [0., 0., 0.],
'X': [0.5, 0., 0.],
}
path = [('G', 'X'),
]
elif dimension == 0:
# 0D case: 1 Bravais lattice, only Gamma point, no path
special_points = {'G': [0., 0., 0.],
}
path = [('G', 'G'),
]
permutation = bravais_info['permutation']
def permute(x, permutation):
# return new_x such that new_x[i]=x[permutation[i]]
return [x[int(p)] for p in permutation]
def invpermute(permutation):
# return the inverse of permutation
return [permutation.index(i) for i in range(3)]
the_special_points = {}
for k in special_points.keys():
# NOTE: this originally returned the inverse of the permutation, but was later changed to permutation
the_special_points[k] = permute(special_points[k], permutation)
# output crystal or cartesian
if cartesian:
the_abs_special_points = {}
for k in the_special_points.keys():
the_abs_special_points[k] = change_reference(
reciprocal_cell, numpy.array(the_special_points[k]), to_cartesian=True
)
return the_abs_special_points, path, bravais_info
else:
return the_special_points, path, bravais_info
|
nilq/baby-python
|
python
|
import signal
import argparse
import logging as log
import os
from pathlib import Path
import errno
from alive_progress import alive_bar
from backupdef import BackupDef
from entries import FolderEntry
from diskspacereserver import DiskSpaceReserver
from util import sanitizeFilename
def backup(source: str, destination: str):
# Create current backup definition from source folder
print("Indexing current folder state...")
with alive_bar(monitor="{count} files", receipt=False) as bar:
folder = FolderEntry.fromFolder(source, bar)
folder.name = sanitizeFilename(folder.name)
new_backupdef = BackupDef(folder)
# Initialize old backup definition
backupdef_path = os.path.join(destination, f"{folder.name}.cbdef")
if Path(backupdef_path).is_file():
print("Loading old backup definition...")
current_backupdef = BackupDef.loadFromFile(backupdef_path)
else:
current_backupdef = BackupDef(FolderEntry(folder.name))
# Initialize delta backup definition
print("Creating delta backup definition...")
delta_backupdef = BackupDef.delta(new_backupdef, current_backupdef)
# Initialize disk space reservation
reserver_path = os.path.join(destination, f"{folder.name}.reserved")
reserver = DiskSpaceReserver(reserver_path, new_backupdef.fileSize * 3)
# Copy over files until the disk is filled up
print("Copying files...")
with alive_bar(delta_backupdef.folder.size,
monitor="{count:,} / {total:,} bytes [{percent:.2%}]",
stats="({rate:,.0f}b/s, eta: {eta}s)") as bar:
while delta_backupdef.folder.contents or delta_backupdef.folder.deleted:
try:
# Before starting to copy over files, reserve space for the eventual backupdef
reserver.reserve()
# Copy the files
delta_backupdef.processDelta(current_backupdef, source, destination, bar)
except KeyboardInterrupt:
# Script was ended by ctrl-c, save backupdef and exit
reserver.release()
current_backupdef.saveToFile(backupdef_path)
print("The copying was interrupted, the progress has been saved.")
exit()
except Exception as e:
if e.errno == errno.ENOSPC:
# Disk full, save backupdef of files copied up to this point and ask for new destination
with bar.pause():
reserver.release()
current_backupdef.saveToFile(backupdef_path)
dest_input = input(f"\aCartridge full, insert next one and enter new path ({destination}): ")
if dest_input != "":
destination = dest_input
backupdef_path = os.path.join(destination, f"{folder.name}.cbdef")
reserver.path = os.path.join(destination, f"{folder.name}.reserved")
else:
# Copying error, save backupdef, print exception message, continue copying next file
reserver.release()
current_backupdef.saveToFile(backupdef_path)
log.warning("The copying was interrupted by an error. "
"The progress has been saved, the details are below:")
log.warning(e)
# Save backupdef of (presumably all) files copied up to this point
reserver.release()
current_backupdef.saveToFile(backupdef_path)
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal.default_int_handler)
parser = argparse.ArgumentParser(description="Perform an incremental backup to"
"multiple, smaller destination drives(cartridges).")
parser.add_argument("source", help="The source directory")
parser.add_argument("destination", help="The destination directory")
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
args = parser.parse_args()
if args.verbose:
log.basicConfig(format="%(levelname)s: %(message)s", level=log.DEBUG)
else:
log.basicConfig(format="%(levelname)s: %(message)s")
log.info(f"Running with source {args.source} and destination {args.destination}")
backup(args.source, args.destination)
|
nilq/baby-python
|
python
|
import redis
def handler(message):
print("New message recieved:", message['data'].decode('utf-8'))
r = redis.Redis('10.14.156.254')
p = r.pubsub()
p.subscribe(**{'chat': handler})
thread = p.run_in_thread(sleep_time=0.5) # Создание потока для получения сообщий
print("Press Ctrl+C to stop")
while True:
try:
new_message = input()
r.publish('chat', new_message)
except KeyboardInterrupt:
break
print("Stopped")
thread.stop()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
methrafo.train <reference genomes><input MeDIP-Seq bigWig> <input Bisulfite-Seq bigWig> <output model prefix>
e.g.
methrafo.train hg19 example_MeDIP.bw example_Bisulfite.bw output_trained_model_prefix
"""
import pdb,sys,os
import gzip
from File import *
import re
import pyBigWig
from scipy.stats import pearsonr
from sklearn.ensemble import RandomForestRegressor
import math
import cPickle as pickle
#-----------------------------------------------------------------------
def fetchGenome(chrom_id,gref):
with gzip.open(gref+'/'+chrom_id+'.fa.gz','rb') as f:
lf=f.read()
lf=lf.split("\n")
chrom_id=lf[0].split('>')[1]
chrom_seq="".join(lf[1:])
chrom_seq=chrom_seq.upper()
return [chrom_id,chrom_seq]
def cgVector(chrom):
chrom_seq=chrom[1]
cgV=[m.start() for m in re.finditer('CG',chrom_seq)]
return cgV
def scoreVector1(chrom,cgv,bwFile):
bw=pyBigWig.open(bwFile)
chrom_name=chrom[0]
sv=[]
for i in cgv:
si=bw.stats(chrom_name,i,i+1)[0]
si=0 if si==None else si
sv.append(si)
return sv
def scoreVector(chrom,cgv,bwFile):
bw=pyBigWig.open(bwFile)
chrom_name=chrom[0]
sc=bw.values(chrom_name,0,len(chrom[1]))
sv=[0 if math.isnan(sc[item]) else sc[item] for item in cgv ]
return sv
def nearbyCGVector(cgv,nearbycut):
nearcgs=[]
for i in range(len(cgv)):
j=i-1
leftcgs=[]
rightcgs=[]
while (j>0):
if abs(cgv[j]-cgv[i])>nearbycut:
break
else:
leftcgs.append(j)
j=j-1
j=i+1
while (j<len(cgv)):
if abs(cgv[j]-cgv[i])>nearbycut:
break
else:
rightcgs.append(j)
j=j+1
inearcgs=leftcgs+rightcgs
nearcgs.append(inearcgs)
return nearcgs
def nearbyCGScoreVector(chrom,bwFile,cgv,nearcgs):
# the contribution of nearby CGs on current CG
nearcgsS=[]
bw=pyBigWig.open(bwFile)
chrom_name=chrom[0]
k=5 # distance weight parameter
for i in range(len(nearcgs)):
cgi=nearcgs[i]
si=0
for j in cgi:
dij=abs(cgv[j]-cgv[i])
sj=bw.stats(chrom_name,cgv[j],cgv[j]+1)[0]
sj=0 if sj==None else sj
si+=(sj/dij)*k
nearcgsS.append(si)
return nearcgsS
#----------------------------------------------------------------------
def main():
if len(sys.argv[1:])!=4:
print(__doc__)
sys.exit(0)
# reference genomes
gref=sys.argv[1]
# bigwig file-MeDIP-seq
bwFile=sys.argv[2]
# bigwig file bisulfite
bwBSFile=sys.argv[3]
output=sys.argv[4]
rfregressor=RandomForestRegressor(random_state=0)
chroms=os.listdir(gref)
dchrom={}
nearbycut=90
rfregressor=RandomForestRegressor(random_state=0)
#----------------------------------------------------------------------
F=[]
T=[]
print("training...")
cut=0.5
for i in chroms:
if i[0:3]=='chr':
iid=i.split('.')[0]
try:
chromi=fetchGenome(iid,gref)
cgv=cgVector(chromi)
sv=scoreVector(chromi,cgv,bwFile)
#pdb.set_trace()
nearcgs=nearbyCGVector(cgv,nearbycut) # number of cgs nearby
tsv=scoreVector(chromi,cgv,bwBSFile)
FI=[]
for j in range(len(cgv)):
fij=[sv[j],len(nearcgs[j])]
FI.append(fij)
FIX=FI[:int(len(FI)*cut)]
tsvX=tsv[:int(len(tsv)*cut)]
F+=FIX
T+=tsvX
print(iid)
except:
pass
rfregressor.fit(F,T)
with open(output+'.pkl','w') as f:
pickle.dump(rfregressor,f)
if __name__=="__main__":
main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Initialize the Hanabi PyQT5 Interface.
"""
from PyQt5 import QtCore, QtGui
from PyQt5.QtGui import QPalette
from PyQt5.QtWidgets import QMainWindow, QDesktopWidget, QApplication
from py_hanabi.interface.hanabi_window import HanabiWindow
from py_hanabi.interface.window import Window
__author__ = "Jakrin Juangbhanich"
__email__ = "juangbhanich.k@gmail.com"
class HanabiInterface(QMainWindow):
def __init__(self):
app = QApplication([])
app.setStyle('Fusion')
palette = QPalette()
palette.setColor(QPalette.Window, QtGui.QColor(53, 53, 53))
palette.setColor(QPalette.WindowText, QtCore.Qt.white)
palette.setColor(QPalette.Base, QtGui.QColor(15, 15, 15))
palette.setColor(QPalette.AlternateBase, QtGui.QColor(53, 53, 53))
palette.setColor(QPalette.ToolTipBase, QtCore.Qt.white)
palette.setColor(QPalette.ToolTipText, QtCore.Qt.white)
palette.setColor(QPalette.Text, QtCore.Qt.white)
palette.setColor(QPalette.Button, QtGui.QColor(53, 53, 53))
palette.setColor(QPalette.ButtonText, QtCore.Qt.white)
palette.setColor(QPalette.BrightText, QtCore.Qt.red)
palette.setColor(QPalette.Highlight, QtGui.QColor(0, 110, 200))
palette.setColor(QPalette.HighlightedText, QtGui.QColor(255, 255, 255))
app.setPalette(palette)
super().__init__()
self.setWindowTitle("Hanabi Visualizer")
self.current_window: Window = None
self.window_game: HanabiWindow = HanabiWindow()
self.show_window(self.window_game)
# Force a resize update.
t = QtCore.QTimer()
t.singleShot(0, self.resizeEvent)
app.exec()
def show_window(self, window: Window):
self.current_window = window
window.render(self)
self.center_screen()
def center_screen(self):
qt_rectangle = self.frameGeometry()
center_point = QDesktopWidget().availableGeometry().center()
qt_rectangle.moveCenter(center_point)
self.move(qt_rectangle.topLeft())
def resizeEvent(self, event=None):
self.current_window.on_resize(event)
|
nilq/baby-python
|
python
|
# Reference: https://github.com/zhangchuheng123/Reinforcement-Implementation/blob/master/code/ppo.py
import torch
def ppo_step(
policy_net,
value_net,
optimizer_policy,
optimizer_value,
optim_value_iter_num,
states,
actions,
returns,
advantages,
fixed_log_probs,
clip_epsilon,
l2_reg,
):
"""Updates Critic network and Policy network with first order optimization
Args:
policy_net: Policy network
value_net: Critic value network
optimizer_policy: optimizer or policy network - Adam
optimizer_value: optimizer or critic network - Adam
optim_value_iter_num: optimizer value iteration number
states: states array
actions: action array
returns: returns values
advantages: estimated advantage values
fixed_log_probs: fixed log probabilities
clip_epsilon: clip epsilon to avoid overfit or underfit
l2_reg: L2 Regularization
"""
# update Critic value network
for _ in range(optim_value_iter_num):
values_pred = value_net(states)
value_loss = (values_pred - returns).pow(2).mean() # MSE for critic network
# weight decays with L2 Regularization
for param in value_net.parameters():
value_loss += param.pow(2).sum() * l2_reg
optimizer_value.zero_grad() # initialize gradients to 0s
# update Critic parameters with Adam optimizer using back propagation
value_loss.backward()
optimizer_value.step()
# update Policy network
log_probs = policy_net.get_log_prob(states, actions) # get log probabilities
# calculate the clipped surrogate objective function
ratio = torch.exp(log_probs - fixed_log_probs)
surr1 = ratio * advantages
surr2 = torch.clamp(ratio, 1.0 - clip_epsilon, 1.0 + clip_epsilon) * advantages
policy_surr = -torch.min(surr1, surr2).mean() # policy net loss
optimizer_policy.zero_grad() # initialize gradients to 0s
# update Actor parameters with Adam optimizer using back propagation
policy_surr.backward()
torch.nn.utils.clip_grad_norm_(
policy_net.parameters(), 40
) # clip the gradient to avoid overfit of under fit
optimizer_policy.step() # update gradients
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module SL81-STD-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SL81-STD-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:57:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, IpAddress, NotificationType, iso, ModuleIdentity, enterprises, Gauge32, TimeTicks, Counter32, MibIdentifier, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Bits, ObjectName, NotificationType, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "IpAddress", "NotificationType", "iso", "ModuleIdentity", "enterprises", "Gauge32", "TimeTicks", "Counter32", "MibIdentifier", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Bits", "ObjectName", "NotificationType", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
omnitronix = MibIdentifier((1, 3, 6, 1, 4, 1, 3052))
sl81 = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5))
status = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 1))
config = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2))
productIds = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 3))
techSupport = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 99))
eventSensorStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1))
dataEventStatus = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2))
eventSensorBasics = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1))
dataEventConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2))
serialPorts = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3))
network = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 4))
modem = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 5))
snmp = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 6))
pagers = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7))
time = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 8))
timeouts = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 2, 9))
esPointTable = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1), )
if mibBuilder.loadTexts: esPointTable.setStatus('mandatory')
esPointEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1), ).setIndexNames((0, "SL81-STD-MIB", "esIndexES"), (0, "SL81-STD-MIB", "esIndexPC"), (0, "SL81-STD-MIB", "esIndexPoint"))
if mibBuilder.loadTexts: esPointEntry.setStatus('mandatory')
esIndexES = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esIndexES.setStatus('mandatory')
esIndexPC = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esIndexPC.setStatus('mandatory')
esIndexPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esIndexPoint.setStatus('mandatory')
esPointName = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: esPointName.setStatus('mandatory')
esPointInEventState = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: esPointInEventState.setStatus('mandatory')
esPointValueInt = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: esPointValueInt.setStatus('mandatory')
esPointValueStr = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esPointValueStr.setStatus('mandatory')
esPointTimeLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esPointTimeLastChange.setStatus('mandatory')
esPointTimetickLastChange = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 1, 1, 1, 9), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esPointTimetickLastChange.setStatus('mandatory')
deStatusTable = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2, 1), )
if mibBuilder.loadTexts: deStatusTable.setStatus('mandatory')
deStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2, 1, 1), ).setIndexNames((0, "SL81-STD-MIB", "deStatusIndex"))
if mibBuilder.loadTexts: deStatusEntry.setStatus('mandatory')
deStatusIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: deStatusIndex.setStatus('mandatory')
deStatusName = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: deStatusName.setStatus('mandatory')
deStatusCounter = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2, 1, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: deStatusCounter.setStatus('mandatory')
deStatusThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: deStatusThreshold.setStatus('mandatory')
deStatusLastTriggerTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: deStatusLastTriggerTime.setStatus('mandatory')
deStatusLastTriggerData = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 1, 2, 1, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: deStatusLastTriggerData.setStatus('mandatory')
esNumberEventSensors = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNumberEventSensors.setStatus('mandatory')
esTable = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2), )
if mibBuilder.loadTexts: esTable.setStatus('mandatory')
esEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1), ).setIndexNames((0, "SL81-STD-MIB", "esIndex"))
if mibBuilder.loadTexts: esEntry.setStatus('mandatory')
esIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esIndex.setStatus('mandatory')
esName = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esName.setStatus('mandatory')
esID = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esID.setStatus('mandatory')
esNumberTempSensors = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNumberTempSensors.setStatus('mandatory')
esTempReportingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esTempReportingMode.setStatus('mandatory')
esNumberCCs = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNumberCCs.setStatus('mandatory')
esCCReportingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esCCReportingMode.setStatus('mandatory')
esNumberHumidSensors = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNumberHumidSensors.setStatus('mandatory')
esHumidReportingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 9), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esHumidReportingMode.setStatus('mandatory')
esNumberNoiseSensors = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNumberNoiseSensors.setStatus('mandatory')
esNoiseReportingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNoiseReportingMode.setStatus('mandatory')
esNumberAirflowSensors = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNumberAirflowSensors.setStatus('mandatory')
esAirflowReportingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esAirflowReportingMode.setStatus('mandatory')
esNumberAnalog = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNumberAnalog.setStatus('mandatory')
esAnalogReportingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 15), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esAnalogReportingMode.setStatus('mandatory')
esNumberRelayOutputs = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 16), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esNumberRelayOutputs.setStatus('mandatory')
esRelayReportingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 1, 2, 1, 17), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: esRelayReportingMode.setStatus('mandatory')
deFieldTable = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 1), )
if mibBuilder.loadTexts: deFieldTable.setStatus('mandatory')
deFieldEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 1, 1), ).setIndexNames((0, "SL81-STD-MIB", "deFieldIndex"))
if mibBuilder.loadTexts: deFieldEntry.setStatus('mandatory')
deFieldIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: deFieldIndex.setStatus('mandatory')
deFieldStart = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 1, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deFieldStart.setStatus('mandatory')
deFieldLength = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 1, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deFieldLength.setStatus('mandatory')
deFieldName = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 1, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deFieldName.setStatus('mandatory')
deConfigTable = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2), )
if mibBuilder.loadTexts: deConfigTable.setStatus('mandatory')
deConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1), ).setIndexNames((0, "SL81-STD-MIB", "deConfigIndex"))
if mibBuilder.loadTexts: deConfigEntry.setStatus('mandatory')
deConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: deConfigIndex.setStatus('mandatory')
deConfigEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigEnabled.setStatus('mandatory')
deConfigName = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigName.setStatus('mandatory')
deConfigEquation = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigEquation.setStatus('mandatory')
deConfigThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigThreshold.setStatus('mandatory')
deConfigClearMode = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigClearMode.setStatus('mandatory')
deConfigClearTime = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 7), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigClearTime.setStatus('mandatory')
deConfigAutoClear = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigAutoClear.setStatus('mandatory')
deConfigActions = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 9), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigActions.setStatus('mandatory')
deConfigTrapNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 10), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigTrapNumber.setStatus('mandatory')
deConfigClass = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 2, 2, 1, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: deConfigClass.setStatus('mandatory')
numberPorts = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: numberPorts.setStatus('mandatory')
portConfigTable = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2), )
if mibBuilder.loadTexts: portConfigTable.setStatus('mandatory')
portConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1), ).setIndexNames((0, "SL81-STD-MIB", "portConfigIndex"))
if mibBuilder.loadTexts: portConfigEntry.setStatus('mandatory')
portConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portConfigIndex.setStatus('mandatory')
portConfigBaud = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portConfigBaud.setStatus('mandatory')
portConfigDataFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portConfigDataFormat.setStatus('mandatory')
portConfigStripPtOutputLfs = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portConfigStripPtOutputLfs.setStatus('mandatory')
portConfigStripPtInputLfs = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portConfigStripPtInputLfs.setStatus('mandatory')
portConfigDTRLowIdle = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portConfigDTRLowIdle.setStatus('mandatory')
portConfigMaskEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portConfigMaskEnable.setStatus('mandatory')
portConfigDAEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 3, 2, 1, 8), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: portConfigDAEnable.setStatus('mandatory')
ipConfigStatic = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 4, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ipConfigStatic.setStatus('mandatory')
ipConfigAddress = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 4, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipConfigAddress.setStatus('mandatory')
ipConfigSubnetMask = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 4, 3), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipConfigSubnetMask.setStatus('mandatory')
ipConfigDefaultRouter = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 4, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipConfigDefaultRouter.setStatus('mandatory')
ipConfigEngage = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 4, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ipConfigEngage.setStatus('mandatory')
telnetDuplex = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 4, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: telnetDuplex.setStatus('mandatory')
modemDataFormat = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 5, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: modemDataFormat.setStatus('mandatory')
modemUserSetup = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 5, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: modemUserSetup.setStatus('mandatory')
modemTAPSetup = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 5, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: modemTAPSetup.setStatus('mandatory')
modemTimeBetweenOutbound = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 5, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: modemTimeBetweenOutbound.setStatus('mandatory')
smTable = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 2, 6, 1), )
if mibBuilder.loadTexts: smTable.setStatus('mandatory')
smEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 2, 6, 1, 1), ).setIndexNames((0, "SL81-STD-MIB", "smIndex"))
if mibBuilder.loadTexts: smEntry.setStatus('mandatory')
smIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 6, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: smIndex.setStatus('mandatory')
smAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 6, 1, 1, 2), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: smAddress.setStatus('mandatory')
pagerRetries = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pagerRetries.setStatus('mandatory')
pagerTable = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 2), )
if mibBuilder.loadTexts: pagerTable.setStatus('mandatory')
pagerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 2, 1), ).setIndexNames((0, "SL81-STD-MIB", "pagerIndex"))
if mibBuilder.loadTexts: pagerEntry.setStatus('mandatory')
pagerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: pagerIndex.setStatus('mandatory')
pagerType = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 2, 1, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pagerType.setStatus('mandatory')
pagerPhoneNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 2, 1, 3), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pagerPhoneNumber.setStatus('mandatory')
pagerID = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 2, 1, 4), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pagerID.setStatus('mandatory')
pagerPostCalloutDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 2, 1, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pagerPostCalloutDelay.setStatus('mandatory')
pagerIDDelay = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 2, 7, 2, 1, 6), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: pagerIDDelay.setStatus('mandatory')
clock = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 8, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: clock.setStatus('mandatory')
autoDSTAdjust = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 8, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: autoDSTAdjust.setStatus('mandatory')
commandTimeout = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 9, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: commandTimeout.setStatus('mandatory')
passthroughTimeout = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 2, 9, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: passthroughTimeout.setStatus('mandatory')
siteID = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 1), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: siteID.setStatus('mandatory')
thisProduct = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: thisProduct.setStatus('mandatory')
stockTrapString = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: stockTrapString.setStatus('mandatory')
trapEventTypeNumber = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapEventTypeNumber.setStatus('mandatory')
trapEventTypeName = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapEventTypeName.setStatus('mandatory')
trapIncludedValue = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 6), Integer32().subtype(subtypeSpec=ValueRangeConstraint(-32768, 32767))).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapIncludedValue.setStatus('mandatory')
trapIncludedString = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: trapIncludedString.setStatus('mandatory')
trapEventClassNumber = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 9), Integer32())
if mibBuilder.loadTexts: trapEventClassNumber.setStatus('mandatory')
trapEventClassName = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 3, 10), Integer32())
if mibBuilder.loadTexts: trapEventClassName.setStatus('mandatory')
techSupport1 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport1.setStatus('mandatory')
techSupport2 = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 99, 2))
techSupport2n1 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 2, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport2n1.setStatus('mandatory')
techSupport2n2 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 2, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport2n2.setStatus('mandatory')
techSupport3 = MibIdentifier((1, 3, 6, 1, 4, 1, 3052, 5, 99, 3))
techSupport3n1 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 3, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport3n1.setStatus('mandatory')
techSupport3n2 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 3, 2), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport3n2.setStatus('mandatory')
techSupport3n3 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 3, 3), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport3n3.setStatus('mandatory')
techSupport3n4 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 3, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport3n4.setStatus('mandatory')
techSupport3n5 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 3, 5), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport3n5.setStatus('mandatory')
techSupport4 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 4), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport4.setStatus('mandatory')
techSupport7 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 7), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport7.setStatus('mandatory')
techSupport9 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 9), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport9.setStatus('mandatory')
techSupport10 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 10), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport10.setStatus('mandatory')
techSupport11 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 11), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport11.setStatus('mandatory')
techSupport16 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 16), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport16.setStatus('mandatory')
techSupport17 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 17), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport17.setStatus('mandatory')
techSupport18 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 18), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport18.setStatus('mandatory')
techSupport19 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 19), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport19.setStatus('mandatory')
techSupport20Table = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 99, 20), )
if mibBuilder.loadTexts: techSupport20Table.setStatus('mandatory')
techSupport20Entry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 99, 20, 1), ).setIndexNames((0, "SL81-STD-MIB", "techSupport20Index"))
if mibBuilder.loadTexts: techSupport20Entry.setStatus('mandatory')
techSupport20Index = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 99, 20, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: techSupport20Index.setStatus('mandatory')
techSupport20 = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 99, 20, 1, 2), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport20.setStatus('mandatory')
techSupport21Table = MibTable((1, 3, 6, 1, 4, 1, 3052, 5, 99, 21), )
if mibBuilder.loadTexts: techSupport21Table.setStatus('mandatory')
techSupport21Entry = MibTableRow((1, 3, 6, 1, 4, 1, 3052, 5, 99, 21, 1), ).setIndexNames((0, "SL81-STD-MIB", "techSupport21Index"))
if mibBuilder.loadTexts: techSupport21Entry.setStatus('mandatory')
techSupport21Index = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 99, 21, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: techSupport21Index.setStatus('mandatory')
techSupport21 = MibTableColumn((1, 3, 6, 1, 4, 1, 3052, 5, 99, 21, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: techSupport21.setStatus('mandatory')
techSupport22 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 22), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport22.setStatus('mandatory')
techSupport24 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 24), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport24.setStatus('mandatory')
techSupport25 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 25), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport25.setStatus('mandatory')
techSupport26 = MibScalar((1, 3, 6, 1, 4, 1, 3052, 5, 99, 26), DisplayString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: techSupport26.setStatus('mandatory')
sl81TestTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81StockESDisconnectTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,50)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81StockDataEventTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,100)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81StockContactClosureTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,110)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81StockTempTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,120)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81StockHumidityTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,130)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81StockAnalogTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,140)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81StockCTSTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,160)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81StockSchedTrap = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,170)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "stockTrapString"))
sl81UserTrap1000 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1000)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1001 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1001)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1002 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1002)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1003 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1003)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1004 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1004)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1005 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1005)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1006 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1006)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1007 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1007)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1008 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1008)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1009 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1009)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1010 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1010)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1011 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1011)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1012 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1012)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1013 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1013)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1014 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1014)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1015 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1015)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1016 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1016)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1017 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1017)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1018 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1018)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1019 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1019)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1020 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1020)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1021 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1021)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1022 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1022)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1023 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1023)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1024 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1024)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1025 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1025)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1026 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1026)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1027 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1027)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1028 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1028)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1029 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1029)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1030 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1030)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1031 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1031)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1032 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1032)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1033 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1033)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1034 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1034)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1035 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1035)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1036 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1036)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1037 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1037)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1038 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1038)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1039 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1039)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1040 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1040)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1041 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1041)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1042 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1042)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1043 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1043)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1044 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1044)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1045 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1045)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1046 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1046)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1047 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1047)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1048 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1048)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1049 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1049)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1050 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1050)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1051 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1051)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1052 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1052)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1053 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1053)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1054 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1054)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1055 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1055)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1056 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1056)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1057 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1057)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1058 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1058)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1059 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1059)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1060 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1060)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1061 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1061)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1062 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1062)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1063 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1063)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1064 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1064)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1065 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1065)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1066 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1066)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1067 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1067)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1068 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1068)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1069 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1069)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1070 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1070)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1071 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1071)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1072 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1072)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1073 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1073)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1074 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1074)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1075 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1075)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1076 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1076)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1077 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1077)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1078 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1078)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1079 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1079)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1080 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1080)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1081 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1081)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1082 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1082)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1083 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1083)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1084 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1084)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1085 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1085)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1086 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1086)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1087 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1087)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1088 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1088)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1089 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1089)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1090 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1090)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1091 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1091)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1092 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1092)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1093 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1093)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1094 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1094)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1095 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1095)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1096 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1096)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1097 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1097)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1098 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1098)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1099 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1099)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1100 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1100)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1101 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1101)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1102 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1102)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1103 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1103)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1104 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1104)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1105 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1105)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1106 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1106)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1107 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1107)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1108 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1108)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1109 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1109)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1110 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1110)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1111 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1111)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1112 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1112)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1113 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1113)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1114 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1114)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1115 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1115)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1116 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1116)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1117 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1117)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1118 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1118)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1119 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1119)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1120 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1120)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1121 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1121)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1122 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1122)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1123 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1123)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1124 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1124)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1125 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1125)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1126 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1126)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1127 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1127)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1128 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1128)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1129 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1129)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1130 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1130)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1131 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1131)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1132 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1132)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1133 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1133)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1134 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1134)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1135 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1135)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1136 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1136)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1137 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1137)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1138 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1138)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1139 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1139)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1140 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1140)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1141 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1141)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1142 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1142)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1143 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1143)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1144 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1144)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1145 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1145)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1146 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1146)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1147 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1147)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1148 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1148)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1149 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1149)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1150 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1150)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1151 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1151)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1152 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1152)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1153 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1153)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1154 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1154)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1155 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1155)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1156 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1156)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1157 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1157)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1158 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1158)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1159 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1159)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1160 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1160)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1161 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1161)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1162 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1162)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1163 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1163)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1164 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1164)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1165 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1165)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1166 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1166)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1167 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1167)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1168 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1168)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1169 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1169)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1170 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1170)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1171 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1171)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1172 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1172)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1173 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1173)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1174 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1174)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1175 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1175)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1176 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1176)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1177 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1177)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1178 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1178)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1179 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1179)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1180 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1180)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1181 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1181)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1182 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1182)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1183 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1183)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1184 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1184)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1185 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1185)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1186 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1186)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1187 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1187)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1188 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1188)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1189 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1189)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1190 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1190)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1191 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1191)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1192 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1192)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1193 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1193)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1194 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1194)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1195 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1195)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1196 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1196)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1197 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1197)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1198 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1198)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
sl81UserTrap1199 = NotificationType((1, 3, 6, 1, 4, 1, 3052, 5) + (0,1199)).setObjects(("SL81-STD-MIB", "siteID"), ("SL81-STD-MIB", "esIndex"), ("SL81-STD-MIB", "esName"), ("SL81-STD-MIB", "trapEventTypeNumber"), ("SL81-STD-MIB", "trapEventTypeName"), ("SL81-STD-MIB", "esIndexPoint"), ("SL81-STD-MIB", "esPointName"), ("SL81-STD-MIB", "esID"), ("SL81-STD-MIB", "clock"), ("SL81-STD-MIB", "trapIncludedValue"), ("SL81-STD-MIB", "trapIncludedString"), ("SL81-STD-MIB", "trapEventClassNumber"), ("SL81-STD-MIB", "trapEventClassName"))
mibBuilder.exportSymbols("SL81-STD-MIB", sl81UserTrap1121=sl81UserTrap1121, sl81UserTrap1140=sl81UserTrap1140, sl81UserTrap1171=sl81UserTrap1171, sl81UserTrap1192=sl81UserTrap1192, techSupport3=techSupport3, trapIncludedValue=trapIncludedValue, sl81UserTrap1191=sl81UserTrap1191, smTable=smTable, trapEventTypeName=trapEventTypeName, sl81UserTrap1024=sl81UserTrap1024, ipConfigAddress=ipConfigAddress, sl81StockContactClosureTrap=sl81StockContactClosureTrap, clock=clock, sl81UserTrap1103=sl81UserTrap1103, sl81UserTrap1048=sl81UserTrap1048, sl81UserTrap1097=sl81UserTrap1097, sl81UserTrap1059=sl81UserTrap1059, sl81UserTrap1198=sl81UserTrap1198, sl81UserTrap1100=sl81UserTrap1100, smEntry=smEntry, sl81UserTrap1039=sl81UserTrap1039, deConfigTrapNumber=deConfigTrapNumber, sl81UserTrap1108=sl81UserTrap1108, sl81UserTrap1117=sl81UserTrap1117, techSupport11=techSupport11, techSupport4=techSupport4, modemTimeBetweenOutbound=modemTimeBetweenOutbound, sl81UserTrap1138=sl81UserTrap1138, sl81UserTrap1170=sl81UserTrap1170, sl81UserTrap1023=sl81UserTrap1023, sl81UserTrap1007=sl81UserTrap1007, deConfigClearTime=deConfigClearTime, esCCReportingMode=esCCReportingMode, techSupport21Table=techSupport21Table, sl81UserTrap1053=sl81UserTrap1053, esNumberRelayOutputs=esNumberRelayOutputs, portConfigStripPtInputLfs=portConfigStripPtInputLfs, sl81UserTrap1173=sl81UserTrap1173, sl81UserTrap1081=sl81UserTrap1081, sl81UserTrap1144=sl81UserTrap1144, sl81UserTrap1185=sl81UserTrap1185, trapEventClassName=trapEventClassName, deConfigName=deConfigName, techSupport3n4=techSupport3n4, productIds=productIds, portConfigTable=portConfigTable, techSupport26=techSupport26, sl81UserTrap1153=sl81UserTrap1153, sl81UserTrap1042=sl81UserTrap1042, pagerType=pagerType, sl81UserTrap1037=sl81UserTrap1037, sl81UserTrap1118=sl81UserTrap1118, techSupport9=techSupport9, sl81UserTrap1186=sl81UserTrap1186, numberPorts=numberPorts, sl81UserTrap1182=sl81UserTrap1182, modemDataFormat=modemDataFormat, esNoiseReportingMode=esNoiseReportingMode, techSupport3n2=techSupport3n2, modemTAPSetup=modemTAPSetup, sl81UserTrap1113=sl81UserTrap1113, sl81UserTrap1165=sl81UserTrap1165, sl81UserTrap1107=sl81UserTrap1107, sl81StockCTSTrap=sl81StockCTSTrap, techSupport=techSupport, sl81UserTrap1096=sl81UserTrap1096, sl81UserTrap1004=sl81UserTrap1004, sl81UserTrap1065=sl81UserTrap1065, deConfigEnabled=deConfigEnabled, sl81UserTrap1000=sl81UserTrap1000, sl81UserTrap1141=sl81UserTrap1141, sl81UserTrap1102=sl81UserTrap1102, stockTrapString=stockTrapString, techSupport20Table=techSupport20Table, sl81UserTrap1074=sl81UserTrap1074, sl81UserTrap1086=sl81UserTrap1086, esTable=esTable, portConfigDAEnable=portConfigDAEnable, deFieldTable=deFieldTable, telnetDuplex=telnetDuplex, techSupport10=techSupport10, sl81UserTrap1050=sl81UserTrap1050, smAddress=smAddress, sl81UserTrap1146=sl81UserTrap1146, sl81UserTrap1106=sl81UserTrap1106, sl81UserTrap1062=sl81UserTrap1062, portConfigMaskEnable=portConfigMaskEnable, techSupport21=techSupport21, deFieldLength=deFieldLength, sl81UserTrap1087=sl81UserTrap1087, sl81UserTrap1003=sl81UserTrap1003, esPointName=esPointName, sl81UserTrap1045=sl81UserTrap1045, deFieldEntry=deFieldEntry, techSupport20Entry=techSupport20Entry, sl81UserTrap1012=sl81UserTrap1012, eventSensorStatus=eventSensorStatus, esIndexPoint=esIndexPoint, sl81StockTempTrap=sl81StockTempTrap, sl81UserTrap1068=sl81UserTrap1068, deConfigAutoClear=deConfigAutoClear, esPointTimetickLastChange=esPointTimetickLastChange, sl81UserTrap1030=sl81UserTrap1030, sl81UserTrap1047=sl81UserTrap1047, deStatusTable=deStatusTable, sl81UserTrap1018=sl81UserTrap1018, sl81UserTrap1126=sl81UserTrap1126, sl81UserTrap1008=sl81UserTrap1008, esNumberAirflowSensors=esNumberAirflowSensors, sl81UserTrap1026=sl81UserTrap1026, deConfigClass=deConfigClass, techSupport20=techSupport20, sl81UserTrap1070=sl81UserTrap1070, sl81UserTrap1163=sl81UserTrap1163, sl81UserTrap1172=sl81UserTrap1172, esAnalogReportingMode=esAnalogReportingMode, sl81UserTrap1021=sl81UserTrap1021, esNumberCCs=esNumberCCs, sl81StockSchedTrap=sl81StockSchedTrap, sl81UserTrap1084=sl81UserTrap1084, sl81UserTrap1199=sl81UserTrap1199, sl81UserTrap1079=sl81UserTrap1079, sl81UserTrap1178=sl81UserTrap1178, deConfigThreshold=deConfigThreshold, deConfigActions=deConfigActions, sl81UserTrap1005=sl81UserTrap1005, sl81UserTrap1128=sl81UserTrap1128, sl81UserTrap1032=sl81UserTrap1032, sl81UserTrap1188=sl81UserTrap1188, sl81UserTrap1058=sl81UserTrap1058, sl81UserTrap1089=sl81UserTrap1089, sl81UserTrap1035=sl81UserTrap1035, deStatusName=deStatusName, sl81=sl81, esIndexPC=esIndexPC, sl81UserTrap1054=sl81UserTrap1054, sl81UserTrap1161=sl81UserTrap1161, ipConfigEngage=ipConfigEngage, sl81StockHumidityTrap=sl81StockHumidityTrap, sl81UserTrap1189=sl81UserTrap1189, sl81UserTrap1130=sl81UserTrap1130, sl81UserTrap1181=sl81UserTrap1181, sl81UserTrap1093=sl81UserTrap1093, sl81UserTrap1190=sl81UserTrap1190, portConfigEntry=portConfigEntry, sl81UserTrap1111=sl81UserTrap1111, sl81UserTrap1052=sl81UserTrap1052, timeouts=timeouts, esNumberAnalog=esNumberAnalog, sl81UserTrap1112=sl81UserTrap1112, esPointValueStr=esPointValueStr, sl81UserTrap1043=sl81UserTrap1043, sl81UserTrap1080=sl81UserTrap1080, sl81UserTrap1193=sl81UserTrap1193, esName=esName, sl81UserTrap1049=sl81UserTrap1049, sl81UserTrap1099=sl81UserTrap1099, sl81UserTrap1197=sl81UserTrap1197, sl81UserTrap1028=sl81UserTrap1028, sl81UserTrap1041=sl81UserTrap1041, sl81UserTrap1092=sl81UserTrap1092, techSupport3n5=techSupport3n5, sl81UserTrap1009=sl81UserTrap1009, sl81UserTrap1011=sl81UserTrap1011, sl81UserTrap1044=sl81UserTrap1044, sl81UserTrap1077=sl81UserTrap1077, sl81UserTrap1157=sl81UserTrap1157, deFieldIndex=deFieldIndex, esPointEntry=esPointEntry, sl81UserTrap1110=sl81UserTrap1110, sl81UserTrap1057=sl81UserTrap1057, sl81UserTrap1162=sl81UserTrap1162, sl81UserTrap1147=sl81UserTrap1147, techSupport25=techSupport25, trapEventTypeNumber=trapEventTypeNumber, sl81UserTrap1027=sl81UserTrap1027, sl81UserTrap1114=sl81UserTrap1114, sl81UserTrap1169=sl81UserTrap1169, sl81UserTrap1071=sl81UserTrap1071, autoDSTAdjust=autoDSTAdjust, sl81UserTrap1015=sl81UserTrap1015, techSupport19=techSupport19, sl81UserTrap1075=sl81UserTrap1075, sl81UserTrap1179=sl81UserTrap1179, sl81UserTrap1139=sl81UserTrap1139, sl81UserTrap1196=sl81UserTrap1196, esIndex=esIndex, sl81StockDataEventTrap=sl81StockDataEventTrap, pagerIndex=pagerIndex, sl81UserTrap1167=sl81UserTrap1167, pagerID=pagerID, sl81UserTrap1104=sl81UserTrap1104, sl81StockESDisconnectTrap=sl81StockESDisconnectTrap, sl81UserTrap1175=sl81UserTrap1175, esNumberNoiseSensors=esNumberNoiseSensors, time=time, thisProduct=thisProduct, sl81UserTrap1133=sl81UserTrap1133, dataEventStatus=dataEventStatus, sl81UserTrap1014=sl81UserTrap1014, sl81UserTrap1055=sl81UserTrap1055, deStatusLastTriggerTime=deStatusLastTriggerTime, sl81UserTrap1115=sl81UserTrap1115, deConfigTable=deConfigTable, passthroughTimeout=passthroughTimeout, sl81UserTrap1046=sl81UserTrap1046, sl81UserTrap1040=sl81UserTrap1040, sl81UserTrap1051=sl81UserTrap1051, sl81UserTrap1083=sl81UserTrap1083, sl81UserTrap1152=sl81UserTrap1152, sl81UserTrap1116=sl81UserTrap1116, esIndexES=esIndexES, sl81UserTrap1119=sl81UserTrap1119, dataEventConfig=dataEventConfig, portConfigIndex=portConfigIndex, pagerEntry=pagerEntry, sl81UserTrap1149=sl81UserTrap1149, sl81UserTrap1038=sl81UserTrap1038, techSupport17=techSupport17, sl81UserTrap1176=sl81UserTrap1176, techSupport21Entry=techSupport21Entry, sl81UserTrap1131=sl81UserTrap1131, sl81UserTrap1061=sl81UserTrap1061, sl81UserTrap1002=sl81UserTrap1002, sl81UserTrap1124=sl81UserTrap1124, techSupport16=techSupport16, portConfigStripPtOutputLfs=portConfigStripPtOutputLfs, sl81UserTrap1109=sl81UserTrap1109, esPointTable=esPointTable, siteID=siteID, sl81UserTrap1166=sl81UserTrap1166, sl81UserTrap1064=sl81UserTrap1064, techSupport3n1=techSupport3n1, sl81UserTrap1136=sl81UserTrap1136, sl81UserTrap1125=sl81UserTrap1125, sl81UserTrap1066=sl81UserTrap1066, esAirflowReportingMode=esAirflowReportingMode, sl81UserTrap1019=sl81UserTrap1019, sl81UserTrap1036=sl81UserTrap1036, techSupport18=techSupport18, sl81UserTrap1127=sl81UserTrap1127, portConfigDataFormat=portConfigDataFormat, trapEventClassNumber=trapEventClassNumber, sl81UserTrap1184=sl81UserTrap1184, esHumidReportingMode=esHumidReportingMode, pagerPostCalloutDelay=pagerPostCalloutDelay, sl81UserTrap1159=sl81UserTrap1159, sl81UserTrap1123=sl81UserTrap1123, smIndex=smIndex, techSupport20Index=techSupport20Index, techSupport7=techSupport7, deStatusThreshold=deStatusThreshold, techSupport22=techSupport22, sl81UserTrap1158=sl81UserTrap1158)
mibBuilder.exportSymbols("SL81-STD-MIB", sl81UserTrap1073=sl81UserTrap1073, portConfigBaud=portConfigBaud, sl81UserTrap1120=sl81UserTrap1120, ipConfigSubnetMask=ipConfigSubnetMask, deFieldName=deFieldName, sl81UserTrap1164=sl81UserTrap1164, sl81UserTrap1006=sl81UserTrap1006, sl81UserTrap1017=sl81UserTrap1017, sl81TestTrap=sl81TestTrap, sl81UserTrap1105=sl81UserTrap1105, sl81UserTrap1088=sl81UserTrap1088, sl81UserTrap1098=sl81UserTrap1098, ipConfigStatic=ipConfigStatic, pagerRetries=pagerRetries, sl81UserTrap1129=sl81UserTrap1129, sl81StockAnalogTrap=sl81StockAnalogTrap, techSupport2n2=techSupport2n2, sl81UserTrap1063=sl81UserTrap1063, deConfigEquation=deConfigEquation, sl81UserTrap1091=sl81UserTrap1091, sl81UserTrap1187=sl81UserTrap1187, sl81UserTrap1025=sl81UserTrap1025, sl81UserTrap1010=sl81UserTrap1010, config=config, sl81UserTrap1067=sl81UserTrap1067, deConfigEntry=deConfigEntry, portConfigDTRLowIdle=portConfigDTRLowIdle, sl81UserTrap1160=sl81UserTrap1160, pagerIDDelay=pagerIDDelay, sl81UserTrap1132=sl81UserTrap1132, modem=modem, sl81UserTrap1033=sl81UserTrap1033, sl81UserTrap1148=sl81UserTrap1148, techSupport3n3=techSupport3n3, sl81UserTrap1029=sl81UserTrap1029, sl81UserTrap1150=sl81UserTrap1150, sl81UserTrap1069=sl81UserTrap1069, sl81UserTrap1194=sl81UserTrap1194, sl81UserTrap1134=sl81UserTrap1134, sl81UserTrap1122=sl81UserTrap1122, techSupport24=techSupport24, sl81UserTrap1056=sl81UserTrap1056, esID=esID, sl81UserTrap1151=sl81UserTrap1151, sl81UserTrap1082=sl81UserTrap1082, techSupport1=techSupport1, snmp=snmp, deConfigClearMode=deConfigClearMode, sl81UserTrap1143=sl81UserTrap1143, sl81UserTrap1078=sl81UserTrap1078, pagers=pagers, network=network, ipConfigDefaultRouter=ipConfigDefaultRouter, sl81UserTrap1177=sl81UserTrap1177, sl81UserTrap1174=sl81UserTrap1174, sl81UserTrap1060=sl81UserTrap1060, deStatusIndex=deStatusIndex, sl81UserTrap1156=sl81UserTrap1156, commandTimeout=commandTimeout, deStatusEntry=deStatusEntry, sl81UserTrap1031=sl81UserTrap1031, sl81UserTrap1142=sl81UserTrap1142, serialPorts=serialPorts, sl81UserTrap1101=sl81UserTrap1101, status=status, sl81UserTrap1013=sl81UserTrap1013, deStatusCounter=deStatusCounter, esNumberTempSensors=esNumberTempSensors, pagerPhoneNumber=pagerPhoneNumber, sl81UserTrap1137=sl81UserTrap1137, deFieldStart=deFieldStart, trapIncludedString=trapIncludedString, esNumberEventSensors=esNumberEventSensors, esPointValueInt=esPointValueInt, modemUserSetup=modemUserSetup, sl81UserTrap1145=sl81UserTrap1145, sl81UserTrap1095=sl81UserTrap1095, esPointInEventState=esPointInEventState, techSupport2n1=techSupport2n1, sl81UserTrap1085=sl81UserTrap1085, esRelayReportingMode=esRelayReportingMode, sl81UserTrap1154=sl81UserTrap1154, pagerTable=pagerTable, sl81UserTrap1168=sl81UserTrap1168, sl81UserTrap1090=sl81UserTrap1090, deConfigIndex=deConfigIndex, deStatusLastTriggerData=deStatusLastTriggerData, sl81UserTrap1001=sl81UserTrap1001, eventSensorBasics=eventSensorBasics, esNumberHumidSensors=esNumberHumidSensors, sl81UserTrap1016=sl81UserTrap1016, sl81UserTrap1135=sl81UserTrap1135, sl81UserTrap1094=sl81UserTrap1094, sl81UserTrap1180=sl81UserTrap1180, esEntry=esEntry, sl81UserTrap1155=sl81UserTrap1155, sl81UserTrap1195=sl81UserTrap1195, techSupport21Index=techSupport21Index, sl81UserTrap1072=sl81UserTrap1072, sl81UserTrap1034=sl81UserTrap1034, sl81UserTrap1020=sl81UserTrap1020, esTempReportingMode=esTempReportingMode, sl81UserTrap1076=sl81UserTrap1076, techSupport2=techSupport2, sl81UserTrap1183=sl81UserTrap1183, sl81UserTrap1022=sl81UserTrap1022, omnitronix=omnitronix, esPointTimeLastChange=esPointTimeLastChange)
|
nilq/baby-python
|
python
|
from allauth.account.forms import ChangePasswordForm as AllauthChangePasswordForm
class ChangePasswordForm(AllauthChangePasswordForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
del self.fields["oldpassword"].widget.attrs["placeholder"]
del self.fields["password1"].widget.attrs["placeholder"]
del self.fields["password2"].widget.attrs["placeholder"]
|
nilq/baby-python
|
python
|
"""Tests for the convention subsections."""
import re
import pytest
from tests.test_convention_doc import doctypes
@pytest.fixture(
scope="module",
params=[
pytest.param((index, subsection), id=subsection.identifier)
for section in doctypes.SECTIONS
for index, subsection in enumerate(section.subsections)
],
)
def enumerated_subsections(request):
"""Parametrized fixture of each subsection along with its index in the parent section."""
return request.param
def test_subsection_identifier_valid(subsection):
"""Test that the section's identifier is a valid section identifier and matches expectations."""
assert re.match(r"[A-Z]+\.[1-9][0-9]*", subsection.identifier)
assert subsection.identifier.startswith(subsection.parent.identifier)
def test_subsection_identifiers_strictly_increasing(enumerated_subsections):
"""Test that the subsections in a section use strictly incrementing identifiers."""
index, subsection = enumerated_subsections
assert subsection.identifier.split(".")[-1] == str(index + 1)
def test_subsection_isnt_rule(subsection):
"""Test that we don't use subsections for rules."""
assert not (
subsection.header_text.startswith(" ✔️ **DO**")
or subsection.header_text.startswith(" ✔️ **CONSIDER**")
or subsection.header_text.startswith(" ❌ **AVOID**")
or subsection.header_text.startswith(" ❌ **DO NOT**")
)
def test_subsection_identifier_follows_case_convention(subsection):
"""Test that the subsection header starts with an uppercase letter."""
header_text = subsection.header_text.lstrip()
assert header_text[0].isupper(), "header should start with an uppercase letter"
|
nilq/baby-python
|
python
|
from PKG.models import ResnetV2
import tensorflow as tf
class Classifier(tf.keras.models.Model):
def __init__(self, nclasses, weights=None):
super(Classifier, self).__init__()
self.nn = ResnetV2()
self.classifier = tf.keras.layers.Dense(nclasses)
self.activation = tf.keras.layers.Activation("softmax")
self.dropout = tf.keras.layers.Dropout(0.3)
def call(self, X, training=False):
Y = self.nn(X)
Y = self.classifier(Y)
Y = self.activation(Y)
Y = self.dropout(Y, training=training)
return Y
|
nilq/baby-python
|
python
|
from helpers.observerpattern import Observable
from HTMLParser import HTMLParser
class DomBuilder(Observable, HTMLParser):
"""
This class is on charge of parse the plainHTML provided via a Reader and construct
a dom representation with it. DOM structure is decoupled from this class and need
to be passed at the time of construction.
"""
# Some elements don't have a closing tag ( https://www.w3.org/TR/html51/syntax.html#void-elements )
voidTags = ["area", "base", "br", "col", "embed", "hr", "img", "input", "keygen",
"link", "menuitem", "meta", "param", "source", "track", "wbr"] # const
def __init__(self, dom):
HTMLParser.__init__(self)
self.dom = dom
self.actualParent = [None,]
def _finishParsing(self):
self._trigger("ParsingFinished", { 'dom': self.dom })
def handle_starttag(self, tag, attrs):
element = (tag, attrs, self.actualParent[-1])
nodeIndex = self.dom.addNode( element )
if tag not in self.voidTags:
self.actualParent.append( nodeIndex )
def handle_endtag(self, tag):
if tag in self.voidTags:
return # We already did the job
actualParent = self.actualParent.pop()
if self.dom.getNode( actualParent )[0] != tag:
raise Exception("DomBuilder - Closing tag is missing") # TODO: Custom error object. (ParseEror ?)
if self.actualParent[-1] == None:
self._finishParsing()
|
nilq/baby-python
|
python
|
class Eventseverity(basestring):
"""
EMERGENCY|ALERT|CRITICAL|ERROR|WARNING|NOTICE|INFORMATIONAL|DEBUG
Possible values:
<ul>
<li> "emergency" - System is unusable,
<li> "alert" - Action must be taken immediately,
<li> "critical" - Critical condition,
<li> "error" - Error condition,
<li> "warning" - Warning condition,
<li> "notice" - Normal but significant condition,
<li> "informational" - Information message,
<li> "debug" - A debugging message
</ul>
"""
@staticmethod
def get_api_name():
return "eventseverity"
|
nilq/baby-python
|
python
|
from .attachment import Attachment
from .integration import Integration
from .message import Message
from .field import Field
|
nilq/baby-python
|
python
|
""" 152. Maximum Product Subarray
Given an integer array nums, find the contiguous subarray within an array (containing at least one number) which has the largest product.
Example 1:
Input: [2,3,-2,4]
Output: 6
Explanation: [2,3] has the largest product 6.
Example 2:
Input: [-2,0,-1]
Output: 0
Explanation: The result cannot be 2, because [-2,-1] is not a subarray.
"""
class Solution:
def maxProduct(self, nums: List[int]) -> int:
if len(nums) == 0: return 0
if len(nums) == 1: return nums[0]
leftIndex, totalProd = 0,1
prodArray = [0]
for i in range(len(nums)):
if nums[i] == 0:
prodArray.append(Solution.nonNegProd(self, nums[leftIndex:i]))
leftIndex = i+1
prodArray.append(Solution.nonNegProd(self, nums[leftIndex:len(nums)]))
return max(prodArray)
def nonNegProd(self, nums: List[int]) -> int:
if len(nums) == 0: return 0
if len(nums) == 1: return nums[0]
numNeg, totalProd = 0, 1
for i in range(len(nums)):
totalProd*=nums[i]
if nums[i] < 0: numNeg+=1
if numNeg % 2 ==0: return totalProd
leftProd, rightProd = totalProd, totalProd
for i in range(len(nums)-1,-1,-1):
leftProd/=nums[i]
if nums[i] < 0: break
for i in range(len(nums)):
rightProd/=nums[i]
if nums[i] < 0: break
return int(leftProd) if leftProd > rightProd else int(rightProd)
|
nilq/baby-python
|
python
|
import numpy as np
import math
import matplotlib.pyplot as plt
from bandit import Bandit
from explore_then_exploit_agent import ExploreThenExploit
from MC_simulator import *
from epsilon_greedy_agent import EpsilonGreedy
from ubc1_agent import UBC1Agent
from report import plot
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# import the necessary packages
from tnmlearn.nn.conv import MiniVGGNet
from keras.optimizers import SGD
from tnmlearn.examples import BaseLearningModel
from tnmlearn.datasets import load_cifar10
# %%
class MiniVggNetCifar10(BaseLearningModel):
def __init__(self):
super(MiniVggNetCifar10, self).__init__()
def getData(self):
((self.trainX, self.trainY),
(self.testX, self.testY),
self.classNames) = load_cifar10()
def build(self):
# initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr=0.01, decay=0.01 / 40, momentum=0.9, nesterov=True)
self.model = MiniVGGNet.build(width=32, height=32, depth=3, classes=10)
self.model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
def fit(self):
return self.fit_(40, 64)
def evaluate(self):
self.evaluate_(64)
|
nilq/baby-python
|
python
|
from app.order.domain.order import Order
from app.order.domain.order_repository import OrderRepository
class SqlOrderRepository(OrderRepository):
def __init__(self, session):
self.session = session
def save(self, order: Order):
self.session.add(order)
self.session.commit()
def find_all(self):
return self.session.query(Order).all()
def find_by_id(self, id: int):
return self.session.query(Order).filter(Order.id == id).first()
|
nilq/baby-python
|
python
|
import os
import numpy as np
from copy import copy
from easyric.io import pix4d, geotiff, shp, plot
from easyric.calculate import geo2raw, geo2tiff, raw2raw
####################
# Software wrapper #
####################
class Pix4D:
def __init__(self, project_path, raw_img_path=None, project_name=None,
param_folder=None, dom_path=None, dsm_path=None, ply_path=None):
######################
# Project Attributes #
######################
self.project_path = self._get_full_path(project_path)
sub_folder = os.listdir(self.project_path)
if project_name is None:
self.project_name = os.path.basename(self.project_path)
else:
self.project_name = project_name
if raw_img_path is not None:
self.raw_img_path = os.path.normpath(raw_img_path)
else:
self.raw_img_path = None
#################
# Project Files #
#################
self.xyz_file = None
self.pmat_file = None
self.cicp_file = None
self.ccp_file = None
self.campos_file = None
self.ply_file = None
self.dom_file = None
self.dsm_file = None
self.dom_header = None
self.dsm_header = None
if '1_initial' in sub_folder:
self._original_specify()
else:
if param_folder is None:
raise FileNotFoundError(f'[Wrapper][Pix4D] Current folder |{self.project_path}| is not a standard '
f'pix4d default projects, "1_initial" folder not found and `param_folder` not specified')
else:
self._manual_specify(param_folder, dom_path, dsm_path, ply_path)
if self.dom_file is not None:
self.dom_header = geotiff.get_header(self.dom_file)
if self.dsm_file is not None:
self.dsm_header = geotiff.get_header(self.dsm_file)
###############
# Init Params #
###############
# --------------------
# >>> p4d.offset.x
# 368109.00
# >>> p4d.Py
# 3.9716578516421746
# >>> p4d.img[0].name
# ''DJI_0172.JPG''
# >>> p4d.img['DJI_0172.JPG']
# <class Image>
# >>> p4d.img[0].pmat
# pmat_ndarray
# --------------------
self.offset = None
self.img = None
# from cicp file
self.F = None
self.Px = None
self.Py = None
self.K1 = None
self.K2 = None
self.K3 = None
self.T1 = None
self.T2 = None
self.offset = OffSet(self._get_offsets())
self.img_pos = self._get_campos_df()
vars(self).update(self._get_cicp_dict())
self.img = ImageSet(img_path=self.raw_img_path,
pmat_dict=self._get_pmat_dict(),
ccp_dict=self._get_ccp_dict(),
img_pos=self.img_pos)
def _original_specify(self):
sub_folder = os.listdir(self.project_path)
self.xyz_file = f"{self.project_path}/1_initial/params/{self.project_name}_offset.xyz"
self.pmat_file = f"{self.project_path}/1_initial/params/{self.project_name}_pmatrix.txt"
self.cicp_file = f"{self.project_path}/1_initial/params/{self.project_name}_pix4d_calibrated_internal_camera_parameters.cam"
self.ccp_file = f"{self.project_path}/1_initial/params/{self.project_name}_calibrated_camera_parameters.txt"
self.campos_file = f"{self.project_path}/1_initial/params/{self.project_name}_calibrated_images_position.txt"
if self.raw_img_path is None:
undistorted_path = f"{self.project_path}/1_initial/images/undistorted_images"
if os.path.exists(undistorted_path):
self.raw_img_path = undistorted_path
else:
raise FileNotFoundError("raw image file not given, and could not find undistorted images outputs in Pix4D project")
self.ply_file = None
if '2_densification' in sub_folder:
dens_folder = f"{self.project_path}/2_densification/point_cloud"
self.ply_file = self._check_end(dens_folder, '.ply')
self.dom_file = None
self.dsm_file = None
if '3_dsm_ortho' in sub_folder:
dsm_folder = f"{self.project_path}/3_dsm_ortho/1_dsm"
dom_folder = f"{self.project_path}/3_dsm_ortho/2_mosaic"
self.dsm_file = self._check_end(dsm_folder, '.tif')
self.dom_file = self._check_end(dom_folder, '.tif')
def _manual_specify(self, param_folder, dom_path=None, dsm_path=None, ply_path=None):
self.xyz_file = self._get_full_path(f"{param_folder}/{self.project_name}_offset.xyz")
self.pmat_file = self._get_full_path(f"{param_folder}/{self.project_name}_pmatrix.txt")
self.cicp_file = self._get_full_path(f"{param_folder}/{self.project_name}_pix4d_calibrated_internal_camera_parameters.cam")
self.ccp_file = self._get_full_path(f"{param_folder}/{self.project_name}_calibrated_camera_parameters.txt")
self.campos_file = self._get_full_path(f"{param_folder}/{self.project_name}_calibrated_images_position.txt")
if ply_path is None:
try_ply = f"{self.project_name}_group1_densified_point_cloud.ply"
self.ply_file = self._get_full_path(f"{self.project_path}/{try_ply}")
if self.ply_file is not None:
print(f"[Init][Pix4D] No ply given, however find '{try_ply}' at current project folder")
else:
self.ply_file = self._get_full_path(ply_path)
if dom_path is None:
try_dom = f"{self.project_name}_transparent_mosaic_group1.tif"
self.dom_file = self._get_full_path(f"{self.project_path}/{try_dom}")
if self.dom_file is not None:
print(f"[Init][Pix4D] No dom given, however find '{try_dom}' at current project folder")
else:
self.dom_file = self._get_full_path(dom_path)
if dsm_path is None:
try_dsm = f"{self.project_name}_dsm.tif"
self.dsm_file = self._get_full_path(f"{self.project_path}/{try_dsm}")
if self.dsm_file is not None:
print(f"[Init][Pix4D] No dsm given, however find '{try_dsm}' at current project folder")
else:
self.dsm_file = self._get_full_path(dsm_path)
@staticmethod
def _check_end(folder, ext):
find_path = None
if os.path.exists(folder):
# find the first ply file as output (may cause problem)
for file in os.listdir(folder):
if file.endswith(ext):
find_path = f"{folder}/{file}"
break
return find_path
@staticmethod
def _get_full_path(short_path):
if isinstance(short_path, str):
return os.path.abspath(os.path.normpath(short_path)).replace('\\', '/')
else:
return None
def _get_offsets(self):
return pix4d.read_xyz(self.xyz_file)
def _get_pmat_dict(self):
return pix4d.read_pmat(self.pmat_file)
def _get_ccp_dict(self):
return pix4d.read_ccp(self.ccp_file)
def _get_cicp_dict(self):
return pix4d.read_cicp(self.cicp_file)
def _get_campos_df(self):
return pix4d.read_cam_pos(self.campos_file)
#################
# Easy use apis #
#################
# ======== io.shp =========
def read_shp2d(self, shp_path, shp_proj=None, geotiff_proj=None):
if geotiff_proj is None:
proj = self.dsm_header['proj']
elif geotiff_proj == 'Null': # the special params to do noting transfrom
proj = None
else:
proj = geotiff_proj
shp_dict = shp.read_shp2d(shp_path, shp_proj=shp_proj, geotiff_proj=proj)
return shp_dict
def read_shp3d(self, shp_path, get_z_by='mean', get_z_buffer=0, shp_proj=None, geotiff_proj=None):
shp_dict = shp.read_shp3d(shp_path, self.dsm_file, get_z_by, get_z_buffer, shp_proj, geotiff_proj, geo_head=self.dsm_header)
return shp_dict
# ======== io.geotiff =========
# ======== io.plot =========
# ======== calculate.geo2raw =========
# ======== calculate.geo2tiff =========
# ======== calculate.raw2raw =========
class PhotoScan:
pass
class OpenSfM:
pass
#################
# Used Objects #
#################
class OffSet:
def __init__(self, offsets):
self.x = offsets[0]
self.y = offsets[1]
self.z = offsets[2]
self.np = np.asarray(offsets)
class ImageSet:
def __init__(self, img_path, pmat_dict, ccp_dict, img_pos):
# container for external camera parameters for all raw images
pix4d_used = list(ccp_dict.keys())
self.names = []
self.img = []
# in case the img_path has subfolders
for fpathe, dirs, fs in os.walk(img_path):
for f in fs:
full_path = os.path.join(fpathe,f)
if f in pix4d_used:
# f is img_name
temp = copy(ccp_dict[f])
temp['name'] = f
temp['pmat'] = pmat_dict[f]
temp['path'] = full_path
temp["cam_pos"] = img_pos.loc[f, :].values
self.img.append(Image(**temp))
self.names.append(f)
def __getitem__(self, key):
if isinstance(key, int): # index by photo name
return self.img[key]
elif isinstance(key, str): # index by photo order
return self.img[self.names.index(key)]
elif isinstance(key, slice):
return self.img[key]
else:
print(key)
return None
class Image:
def __init__(self, name, path, w, h, pmat, cam_matrix, rad_distort, tan_distort, cam_pos, cam_rot):
# external parameters
self.name = name
self.path = path
self.w = w
self.h = h
self.pmat = pmat
self.cam_matrix = cam_matrix
self.rad_distort = rad_distort
self.tan_distort = tan_distort
self.cam_pos = cam_pos
self.cam_rot = cam_rot
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 8 14:38:17 2018
@author: jgoldstein
"""
# try to get some simulated PTA data like in Jeff Hazboun's github https://github.com/Hazboun6/pta_simulations
import numpy as np
import glob, os
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 2.5 * 72
import astropy
from astropy.time import Time
import enterprise
from enterprise.pulsar import Pulsar
import enterprise_extensions
from enterprise_extensions import models, model_utils
import libstempo as T2, libstempo.toasim as LT, libstempo.plot as LP
from ephem import Ecliptic, Equatorial
# can use LT.fakepulsar to create a fake libstempo tempopulsar
# LT.fakepulsar(parfile, obstimes, toaerr, [optional params])
# first need to get par files
# downloaded IPTA DR2 in /Documents/data/DR2
# maybe try to use .par files from DR2/release/VersionB/... for a bunch of pulsars?
SOURCE = '/home/jgoldstein/Documents/data/DR2/release/VersionB'
def fake_obs_times(source, cadence=20):
"""
For all pulsars in source, generate some fake observation times
Read start and finish from the pulsar .par file. Then pick random times
with a given average cadence (in days).
Parameters
----------
source: str
path to pulsars with .par files in 'pulsar'/'pulsar'.IPTADR2.par
cadence: scalar
default = 20
average cadence (in days) for fake observations
Returns
-------
list:
pulsar names
NumPy array:
observation times in MJD for each pulsar
"""
pulsars = os.listdir(source)
observation_times = []
for p in pulsars:
parfile = os.path.join(source, p, '{}.IPTADR2.par'.format(p))
# read start and end of the observation from parfile, then get some random obs times
with open(parfile) as parf:
for line in parf:
if 'START' in line:
start = float(line.split()[1])
elif 'FINISH' in line:
finish = float(line.split()[1])
break
# pick n random observation times so that total time / n = cadence
num_obs = int((finish - start) / cadence)
obs = np.sort(np.random.randint(start, high=finish, size=num_obs))
observation_times.append(obs)
return pulsars, observation_times
def make_fake_pulsar(source, pulsar_name, obs_times, toa_err=1e-6):
"""
Make an LT fakepulsar
Parameters
----------
source: str
path to pulsars with .par files in 'pulsar'/'pulsar'.IPTADR2.pa
pulsar_name: str
name of the pulsar (is also the directory with files in source)
obs_times: array-like
times of observation in MJD
toa_err: float
toa error in us
Returns
-------
LT.fakepulsar object
"""
par_path = os.path.join(source, pulsar_name, pulsar_name+'.IPTADR2.par')
return LT.fakepulsar(par_path, obs_times, toa_err)
|
nilq/baby-python
|
python
|
N = int(input())
ans = 0
if N % 100 == 0:
ans = N // 100
else:
ans = (N // 100) + 1
print(ans)
|
nilq/baby-python
|
python
|
#_*_ coding: utf-8 -*-
{
'name': "Carlosma7",
'summary': """
This is the summary of the addon, second try.""",
'description': """
This is the description of the addon.
""",
'author': "Carlos Morales Aguilera",
'website': "http://www.carlosma7.com",
'category': 'Personal project',
'version':'0.1',
'application': True,
'depends': ['base','sale','mail'],
'data': [
'data/data.xml',
'security/ir.model.access.csv',
'views/patient.xml',
'views/kids.xml',
'views/patient_gender.xml',
'views/appointment.xml',
'views/sale.xml',
'views/doctor.xml',
'wizard/create_appointment.xml'],
'installable': True,
'auto_install': True,
}
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module BENU-HTTP-CLIENT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BENU-HTTP-CLIENT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:37:20 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
benuWAG, = mibBuilder.importSymbols("BENU-WAG-MIB", "benuWAG")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Counter64, TimeTicks, Gauge32, iso, IpAddress, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter32, NotificationType, Integer32, ModuleIdentity, Bits, Unsigned32, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Counter64", "TimeTicks", "Gauge32", "iso", "IpAddress", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter32", "NotificationType", "Integer32", "ModuleIdentity", "Bits", "Unsigned32", "MibIdentifier")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
benuHttpClientMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11))
benuHttpClientMIB.setRevisions(('2015-10-21 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: benuHttpClientMIB.setRevisionsDescriptions(('Initial Version',))
if mibBuilder.loadTexts: benuHttpClientMIB.setLastUpdated('201510210000Z')
if mibBuilder.loadTexts: benuHttpClientMIB.setOrganization('Benu Networks,Inc')
if mibBuilder.loadTexts: benuHttpClientMIB.setContactInfo('Benu Networks,Inc Corporate Headquarters 300 Concord Road, Suite 110 Billerica, MA 01821 USA Tel: +1 978-223-4700 Fax: +1 978-362-1908 Email: info@benunets.com')
if mibBuilder.loadTexts: benuHttpClientMIB.setDescription('This MIB module defines management information related to the HTTP client. Copyright (C) 2013 by Benu Networks, Inc. All rights reserved.')
bHttpClientObjects = ObjectIdentity((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1))
if mibBuilder.loadTexts: bHttpClientObjects.setStatus('current')
if mibBuilder.loadTexts: bHttpClientObjects.setDescription('HTTP client information is defined in this branch.')
bHttpClientLatencyTable = MibTable((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1), )
if mibBuilder.loadTexts: bHttpClientLatencyTable.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyTable.setDescription('Latency information list for HTTP client.')
bHttpClientLatencyEntry = MibTableRow((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1), ).setIndexNames((0, "BENU-HTTP-CLIENT-MIB", "bHttpClientLatencyStatsInterval"))
if mibBuilder.loadTexts: bHttpClientLatencyEntry.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyEntry.setDescription('A logical row in the bHttpClientLatencyTable.')
bHttpClientLatencyStatsInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 1), Integer32())
if mibBuilder.loadTexts: bHttpClientLatencyStatsInterval.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyStatsInterval.setDescription('The interval during which the measurements were accumulated. The interval index one indicates the latest interval for which statistics accumulation was completed. Older the statistics data, greater the interval index value. In a system supporting a history of n intervals with IntervalCount(1) and IntervalCount(n), the most and least recent intervals respectively, the following applies at the end of an interval: - discard the value of IntervalCount(n) - the value of IntervalCount(i) becomes that of IntervalCount(i+1) for 1 <= i < n - the value of IntervalCount(1) becomes that of CurrentCount.')
bHttpClientLatencyStatsIntervalDuration = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientLatencyStatsIntervalDuration.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyStatsIntervalDuration.setDescription('Http client latency stats interval duration.')
bHttpClientLatencyTotalPktCount = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 3), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientLatencyTotalPktCount.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyTotalPktCount.setDescription('The count of the total number of packets handled by http client.')
bHttpClientLatencyMaxProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientLatencyMaxProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyMaxProcessingTime.setDescription('Maximum packet processing time handled by http client in micro seconds.')
bHttpClientLatencyMinProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 5), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientLatencyMinProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyMinProcessingTime.setDescription('Minimum packet processing time handled by http client in micro seconds.')
bHttpClientLatencyAvgProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientLatencyAvgProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyAvgProcessingTime.setDescription('Average packet processing time handled by http client in micro seconds.')
bHttpClientLatencyProcessTimeMorethan10MSPktCount = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientLatencyProcessTimeMorethan10MSPktCount.setStatus('current')
if mibBuilder.loadTexts: bHttpClientLatencyProcessTimeMorethan10MSPktCount.setDescription('Number of packets took more than 10 milli second processing time handled by http client.')
bHttpClientServReqLatencyTotalPktCount = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientServReqLatencyTotalPktCount.setStatus('current')
if mibBuilder.loadTexts: bHttpClientServReqLatencyTotalPktCount.setDescription('Total number of http server request packets handled by http client.')
bHttpClientServReqLatencyMaxProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientServReqLatencyMaxProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientServReqLatencyMaxProcessingTime.setDescription('Http server request handled by http client maximum packet processing time in micro seconds.')
bHttpClientServReqLatencyMinProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 10), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientServReqLatencyMinProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientServReqLatencyMinProcessingTime.setDescription('Http server request handled by http client minimum packet processing time in micro seconds.')
bHttpClientServReqLatencyAvgProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientServReqLatencyAvgProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientServReqLatencyAvgProcessingTime.setDescription('Http server request handled by http client average packet processing time in micro seconds.')
bHttpClientServReqLatencyProcessTimeMorethan10MSPktCount = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientServReqLatencyProcessTimeMorethan10MSPktCount.setStatus('current')
if mibBuilder.loadTexts: bHttpClientServReqLatencyProcessTimeMorethan10MSPktCount.setDescription('Number of http server request packets handled by http client took more than 10 milli second processing time.')
bHttpClientJsonParsingLatencyTotalPktCount = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyTotalPktCount.setStatus('current')
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyTotalPktCount.setDescription('Total number of packets handled by http client - JSON parsing.')
bHttpClientJsonParsingLatencyMaxProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 14), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyMaxProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyMaxProcessingTime.setDescription('Maximum packet processing time for JSON parsing handled by httpclient in micro seconds.')
bHttpClientJsonParsingLatencyMinProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 15), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyMinProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyMinProcessingTime.setDescription('Minimum packet processing time for JSON parsing handled by httpclient in micro seconds.')
bHttpClientJsonParsingLatencyAvgProcessingTime = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 16), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyAvgProcessingTime.setStatus('current')
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyAvgProcessingTime.setDescription('Average packet processing time for JSON parsing handled by httpclient in micro seconds.')
bHttpClientJsonParsingLatencyProcessTimeMorethan10MS = MibTableColumn((1, 3, 6, 1, 4, 1, 39406, 2, 1, 11, 1, 1, 1, 17), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyProcessTimeMorethan10MS.setStatus('current')
if mibBuilder.loadTexts: bHttpClientJsonParsingLatencyProcessTimeMorethan10MS.setDescription('Number of packets handled by http client for JSON parsing took more than 10 milli second processing time.')
mibBuilder.exportSymbols("BENU-HTTP-CLIENT-MIB", bHttpClientJsonParsingLatencyAvgProcessingTime=bHttpClientJsonParsingLatencyAvgProcessingTime, bHttpClientLatencyProcessTimeMorethan10MSPktCount=bHttpClientLatencyProcessTimeMorethan10MSPktCount, bHttpClientServReqLatencyMinProcessingTime=bHttpClientServReqLatencyMinProcessingTime, bHttpClientJsonParsingLatencyMaxProcessingTime=bHttpClientJsonParsingLatencyMaxProcessingTime, PYSNMP_MODULE_ID=benuHttpClientMIB, bHttpClientObjects=bHttpClientObjects, benuHttpClientMIB=benuHttpClientMIB, bHttpClientLatencyTable=bHttpClientLatencyTable, bHttpClientLatencyMaxProcessingTime=bHttpClientLatencyMaxProcessingTime, bHttpClientLatencyAvgProcessingTime=bHttpClientLatencyAvgProcessingTime, bHttpClientJsonParsingLatencyMinProcessingTime=bHttpClientJsonParsingLatencyMinProcessingTime, bHttpClientServReqLatencyMaxProcessingTime=bHttpClientServReqLatencyMaxProcessingTime, bHttpClientServReqLatencyProcessTimeMorethan10MSPktCount=bHttpClientServReqLatencyProcessTimeMorethan10MSPktCount, bHttpClientJsonParsingLatencyProcessTimeMorethan10MS=bHttpClientJsonParsingLatencyProcessTimeMorethan10MS, bHttpClientLatencyStatsInterval=bHttpClientLatencyStatsInterval, bHttpClientLatencyStatsIntervalDuration=bHttpClientLatencyStatsIntervalDuration, bHttpClientJsonParsingLatencyTotalPktCount=bHttpClientJsonParsingLatencyTotalPktCount, bHttpClientServReqLatencyAvgProcessingTime=bHttpClientServReqLatencyAvgProcessingTime, bHttpClientLatencyEntry=bHttpClientLatencyEntry, bHttpClientLatencyMinProcessingTime=bHttpClientLatencyMinProcessingTime, bHttpClientServReqLatencyTotalPktCount=bHttpClientServReqLatencyTotalPktCount, bHttpClientLatencyTotalPktCount=bHttpClientLatencyTotalPktCount)
|
nilq/baby-python
|
python
|
"""
.. module:: __init__
:synopsis: This is where all our global variables and instantiation
happens. If there is simple app setup to do, it can be done here, but
more complex work should be farmed off elsewhere, in order to keep
this file readable.
.. moduleauthor:: Dan Schlosser <dan@dan@schlosser.io>
"""
import json
import logging
from flask import Flask
from flask.ext.mongoengine import MongoEngine
from flask.ext.assets import Environment, Bundle
db = MongoEngine()
app = None
adi = dict()
assets = None
gcal_client = None
def create_app(**config_overrides):
"""This is normal setup code for a Flask app, but we give the option
to provide override configurations so that in testing, a different
database can be used.
"""
from app.routes.base import register_error_handlers
# we want to modify the global app, not a local copy
global app
global adi
global assets
global gcal_client
app = Flask(__name__)
# Load config then apply overrides
app.config.from_object('config.flask_config')
app.config.update(config_overrides)
# Initialize assets
assets = Environment(app)
register_scss()
# Setup the database.
db.init_app(app)
# Attach Blueprints (routing) to the app
register_blueprints(app)
# Attache error handling functions to the app
register_error_handlers(app)
# Register the logger.
register_logger(app)
return app
def register_logger(app):
"""Create an error logger and attach it to ``app``."""
max_bytes = int(app.config["LOG_FILE_MAX_SIZE"]) * 1024 * 1024 # MB to B
# Use "# noqa" to silence flake8 warnings for creating a variable that is
# uppercase. (Here, we make a class, so uppercase is correct.)
Handler = logging.handlers.RotatingFileHandler # noqa
f_str = ('%(levelname)s @ %(asctime)s @ %(filename)s '
'%(funcName)s %(lineno)d: %(message)s')
access_handler = Handler(app.config["WERKZEUG_LOG_NAME"],
maxBytes=max_bytes)
access_handler.setLevel(logging.INFO)
logging.getLogger("werkzeug").addHandler(access_handler)
app_handler = Handler(app.config["APP_LOG_NAME"], maxBytes=max_bytes)
formatter = logging.Formatter(f_str)
app_handler.setLevel(logging.INFO)
app_handler.setFormatter(formatter)
app.logger.addHandler(app_handler)
def register_blueprints(app):
"""Registers all the Blueprints (modules) in a function, to avoid
circular dependancies.
Be careful rearranging the order of the app.register_blueprint()
calls, as it can also result in circular dependancies.
"""
from app.routes import client
app.register_blueprint(client)
def register_scss():
"""Registers the Flask-Assets rules for scss compilation. This reads from
``config/scss.json`` to make these rules.
"""
assets.url = app.static_url_path
with open(app.config['SCSS_CONFIG_FILE']) as f:
bundle_set = json.loads(f.read())
output_folder = bundle_set['output_folder']
depends = bundle_set['depends']
for bundle_name, instructions in bundle_set['rules'].iteritems():
bundle = Bundle(*instructions['inputs'],
output=output_folder + instructions['output'],
depends=depends,
filters='scss')
assets.register(bundle_name, bundle)
def run():
"""Runs the app."""
app.run(host=app.config.get('HOST'), port=app.config.get('PORT'))
|
nilq/baby-python
|
python
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
This module includes a collection of testing functions for the QuTiP scattering
module. Tests are approximate with low resolution to minimize runtime.
"""
# Author: Ben Bartlett
# Contact: benbartlett@stanford.edu
import numpy as np
from numpy.testing import assert_, run_module_suite
from qutip.operators import create, destroy
from qutip.states import basis
from qutip.scattering import *
class TestScattering:
"""
A test class for the QuTiP quantum optical scattering module. These tests
only use the two-level system for comparison, since larger systems can
take a long time to run.
"""
def testScatteringProbability(self):
"""
Asserts that pi pulse in TLS has P0 ~ 0 and P0+P1+P2 ~ 1
"""
w0 = 1.0 * 2 * np.pi
gamma = 1.0
sm = np.sqrt(gamma) * destroy(2)
pulseArea = np.pi
pulseLength = 0.2 / gamma
RabiFreq = pulseArea / (2 * pulseLength)
psi0 = basis(2, 0)
tlist = np.geomspace(gamma, 10 * gamma, 40) - gamma
# Define TLS Hamiltonian
H0S = w0 * create(2) * destroy(2)
H1S1 = lambda t, args: \
RabiFreq * 1j * np.exp(-1j * w0 * t) * (t < pulseLength)
H1S2 = lambda t, args: \
RabiFreq * -1j * np.exp(1j * w0 * t) * (t < pulseLength)
Htls = [H0S, [sm.dag(), H1S1], [sm, H1S2]]
# Run the test
P0 = scattering_probability(Htls, psi0, 0, [sm], tlist)
P1 = scattering_probability(Htls, psi0, 1, [sm], tlist)
P2 = scattering_probability(Htls, psi0, 2, [sm], tlist)
assert_(P0 < 1e-3)
assert_(np.abs(P0 + P1 + P2 - 1) < 1e-3)
def testScatteringAmplitude(self):
"""
Asserts that a 2pi pulse in TLS has ~0 amplitude after pulse
"""
w0 = 1.0 * 2 * np.pi
gamma = 1.0
sm = np.sqrt(gamma) * destroy(2)
pulseArea = 2 * np.pi
pulseLength = 0.2 / gamma
RabiFreq = pulseArea / (2 * pulseLength)
psi0 = basis(2, 0)
T = 50
tlist = np.linspace(0, 1 / gamma, T)
# Define TLS Hamiltonian
H0S = w0 * create(2) * destroy(2)
H1S1 = lambda t, args: \
RabiFreq * 1j * np.exp(-1j * w0 * t) * (t < pulseLength)
H1S2 = lambda t, args: \
RabiFreq * -1j * np.exp(1j * w0 * t) * (t < pulseLength)
Htls = [H0S, [sm.dag(), H1S1], [sm, H1S2]]
# Run the test
state = temporal_scattered_state(Htls, psi0, 1, [sm], tlist)
basisVec = temporal_basis_vector([[40]], T)
amplitude = np.abs((basisVec.dag() * state).full().item())
assert_(amplitude < 1e-3)
def testWaveguideSplit(self):
"""
Checks that a trivial splitting of a waveguide collapse operator like
[sm] -> [sm/sqrt2, sm/sqrt2] doesn't affect the normalization or result
"""
gamma = 1.0
sm = np.sqrt(gamma) * destroy(2)
pulseArea = np.pi
pulseLength = 0.2 / gamma
RabiFreq = pulseArea / (2 * pulseLength)
psi0 = basis(2, 0)
tlist = np.geomspace(gamma, 10 * gamma, 40) - gamma
# Define TLS Hamiltonian with rotating frame transformation
Htls = [[sm.dag() + sm, lambda t, args: RabiFreq * (t < pulseLength)]]
# Run the test
c_ops = [sm]
c_ops_split = [sm / np.sqrt(2), sm / np.sqrt(2)]
P1 = scattering_probability(Htls, psi0, 1, c_ops, tlist)
P1_split = scattering_probability(Htls, psi0, 1, c_ops_split, tlist)
tolerance = 1e-7
assert_(1 - tolerance < P1 / P1_split < 1 + tolerance)
if __name__ == "__main__":
run_module_suite()
|
nilq/baby-python
|
python
|
""" Unit test for re module"""
import unittest
import re
class ReTests(unittest.TestCase):
def test_findall(self):
#Skulpt is failing all the commented out tests in test_findall and it shouldn't be
val = re.findall("From","dlkjdsljkdlkdsjlk")
self.assertEqual(len(val), 0)
val = re.findall("From","dlkjd From kdsjlk")
self.assertEqual(len(val), 1)
val = re.findall("From","From dlkjd From kdsjlk")
self.assertEqual(len(val), 2)
val = re.findall("[0-9]+/[0-9]+","1/2 1/3 3/4 1/8 fred 10/0")
self.assertEqual(len(val), 5)
a = re.findall(string="A stitch in time saves nine.", flags=re.IGNORECASE, pattern="a")
self.assertEqual(a, ['A', 'a'])
a = re.findall("[a-z]*ei[a-z]*", "Is Dr. Greiner your friend, Julie?", re.IGNORECASE)
self.assertEqual(a, ['Greiner'])
b = re.findall("[a-z]*(ei|ie)[a-z]*", "Is Dr. Greiner your friend, Julie?", re.IGNORECASE)
self.assertEqual(b, ['ei', 'ie', 'ie'])
c = re.findall("[a-z]*(ei|ie)([a-z]*)", "Is Dr. Greiner your friend, Julie?", re.IGNORECASE)
self.assertEqual(c, [('ei', 'ner'), ('ie', 'nd'), ('ie', '')])
d = re.findall("[a-z]*(?:ei|ie)[a-z]*", "Is Dr. Greiner your friend, Julie?", re.IGNORECASE)
self.assertEqual(d, ['Greiner', 'friend', 'Julie'])
self.assertEqual(re.findall('\w+', "Words, words, words."), ['Words', 'words', 'words'])
self.assertEqual(re.findall('(abc)(def)', 'abcdef'), [('abc', 'def')])
self.assertEqual(re.findall('(abc)(def)', 'abcdefabcdefjaabcdef3sabc'), [('abc', 'def'), ('abc', 'def'), ('abc', 'def')])
self.assertEqual(re.findall('(abc)', 'abcdef'), ['abc'])
self.assertEqual(re.findall('(abc)|(def)', 'abcdefabcdefjaabcdef3sabc'), [('abc', ''), ('', 'def'), ('abc', ''), ('', 'def'), ('abc', ''), ('', 'def'), ('abc', '')])
self.assertEqual(re.findall("^\s*$", ""), [''])
#self.assertEqual(re.findall("\s*|a", " a b"), [' ', '', 'a', ' ', '', ''])
self.assertEqual(re.findall("a|\s*", " a b"), [' ', 'a', ' ', '', ''])
#self.assertEqual(re.findall("\s*|a", " ba b"), [' ', '', '', 'a', ' ', '', ''])
self.assertEqual(re.findall("a|\s*", " ba b"), [' ', '', 'a', ' ', '', ''])
self.assertEqual(re.findall(".",""), [])
self.assertEqual(re.findall(".","a"), ['a'])
self.assertEqual(re.findall(".a","a"), [])
self.assertEqual(re.findall("a","a"), ['a'])
self.assertEqual(re.findall("a.","a\n"), [])
self.assertEqual(re.findall(".a","ba"), ['ba'])
self.assertEqual(re.findall("^",""), [''])
self.assertEqual(re.findall("a^",""), [])
self.assertEqual(re.findall("^a","ba"), [])
self.assertEqual(re.findall("^a","ab"), ['a'])
self.assertEqual(re.findall("^a","\na"), [])
self.assertEqual(re.findall("a^","a"), [])
self.assertEqual(re.findall("$",""), [''])
self.assertEqual(re.findall("$a","a"), [])
self.assertEqual(re.findall("a$","a"), ['a'])
self.assertEqual(re.findall("a$","ab"), [])
self.assertEqual(re.findall("a$","a\nb"), [])
self.assertEqual(re.findall("a$","a\n"), ['a'])
self.assertEqual(re.findall("a*",""), [''])
self.assertEqual(re.findall("ab*","a"), ['a'])
self.assertEqual(re.findall("ab*","ab"), ['ab'])
self.assertEqual(re.findall("ab*","abbbbb"), ['abbbbb'])
self.assertEqual(re.findall("ab*","ba"), ['a'])
self.assertEqual(re.findall("ab*","bbbb"), [])
self.assertEqual(re.findall("a+",""), [])
self.assertEqual(re.findall("ab+","a"), [])
self.assertEqual(re.findall("ab+","ab"), ['ab'])
self.assertEqual(re.findall("ab+","abbbbb"), ['abbbbb'])
self.assertEqual(re.findall("ab+","ba"), [])
self.assertEqual(re.findall("ab+","bbbb"), [])
self.assertEqual(re.findall("a?",""), [''])
self.assertEqual(re.findall("ab?","a"), ['a'])
self.assertEqual(re.findall("ab?","ab"), ['ab'])
self.assertEqual(re.findall("ab?","abbbbb"), ['ab'])
self.assertEqual(re.findall("ab?","ba"), ['a'])
self.assertEqual(re.findall("ab?","bbbb"), [])
#self.assertEqual(re.findall("a*?","a"), ['', 'a', ''])
self.assertEqual(re.findall("ab*?","abbbb"), ['a'])
self.assertEqual(re.findall("ab*?","a"), ['a'])
self.assertEqual(re.findall("ab*?",""), [])
self.assertEqual(re.findall("a+?","a"), ['a'])
self.assertEqual(re.findall("ab+?","abbbb"), ['ab'])
self.assertEqual(re.findall("ab+?","a"), [])
self.assertEqual(re.findall("ab+?",""), [])
#self.assertEqual(re.findall("a??","a"), ['', 'a', ''])
self.assertEqual(re.findall("ab??","abbbb"), ['a'])
self.assertEqual(re.findall("ab??","a"), ['a'])
self.assertEqual(re.findall("ab??",""), [])
self.assertEqual(re.findall("a{2}","a"), [])
self.assertEqual(re.findall("a{2}","aa"), ['aa'])
self.assertEqual(re.findall("a{2}","aaa"), ['aa'])
self.assertEqual(re.findall("a{1,2}b","b"), [])
self.assertEqual(re.findall("a{1,2}b","ab"), ['ab'])
self.assertEqual(re.findall("a{1,2}b","aab"), ['aab'])
self.assertEqual(re.findall("a{1,2}b","aaab"), ['aab'])
self.assertEqual(re.findall("a{,2}b","b"), ['b'])
self.assertEqual(re.findall("a{,2}b","ab"), ['ab'])
self.assertEqual(re.findall("a{,2}b","aab"), ['aab'])
self.assertEqual(re.findall("a{,2}b","aaab"), ['aab'])
self.assertEqual(re.findall("a{2,}b","b"), [])
self.assertEqual(re.findall("a{2,}b","ab"), [])
self.assertEqual(re.findall("a{2,}b","aab"), ['aab'])
self.assertEqual(re.findall("a{2,}b","aaab"), ['aaab'])
self.assertEqual(re.findall("a{3,5}","aaaaaaaaaa"), ['aaaaa', 'aaaaa'])
self.assertEqual(re.findall("a{,5}","aaaaaaaaaa"), ['aaaaa', 'aaaaa', ''])
self.assertEqual(re.findall("a{3,}","aaaaaaaaaa"), ['aaaaaaaaaa'])
self.assertEqual(re.findall("a{1,2}?b","b"), [])
self.assertEqual(re.findall("a{1,2}?b","ab"), ['ab'])
self.assertEqual(re.findall("a{1,2}?b","aab"), ['aab'])
self.assertEqual(re.findall("a{1,2}?b","aaab"), ['aab'])
self.assertEqual(re.findall("a{,2}?b","b"), ['b'])
self.assertEqual(re.findall("a{,2}?b","ab"), ['ab'])
self.assertEqual(re.findall("a{,2}?b","aab"), ['aab'])
self.assertEqual(re.findall("a{,2}?b","aaab"), ['aab'])
self.assertEqual(re.findall("a{2,}?b","b"), [])
self.assertEqual(re.findall("a{2,}?b","ab"), [])
self.assertEqual(re.findall("a{2,}?b","aab"), ['aab'])
self.assertEqual(re.findall("a{2,}?b","aaab"), ['aaab'])
self.assertEqual(re.findall("a{3,5}?","aaaaaaaaaa"), ['aaa', 'aaa', 'aaa'])
#self.assertEqual(re.findall("a{,5}?","aaaaaaaaaa"), ['', 'a', '', 'a', '', 'a', '', 'a', '', 'a', '', 'a', '', 'a', '', 'a', '', 'a', '', 'a', ''])
self.assertEqual(re.findall("a{3,}?","aaaaaaaaaa"), ['aaa', 'aaa', 'aaa'])
self.assertEqual(re.findall("[a,b,c]","abc"), ['a', 'b', 'c'])
self.assertEqual(re.findall("[a-z]","bc"), ['b', 'c'])
self.assertEqual(re.findall("[A-Z,0-9]","abcdefg"), [])
self.assertEqual(re.findall("[^A-Z]","ABCDEFGaHIJKL"), ['a'])
self.assertEqual(re.findall("[a*bc]","*"), ['*'])
self.assertEqual(re.findall("|",""), [''])
self.assertEqual(re.findall("|a",""), [''])
self.assertEqual(re.findall("a|b","ba"), ['b', 'a'])
self.assertEqual(re.findall("h|ello","hello"), ['h', 'ello'])
self.assertEqual(re.findall("(b*)","bbbba"), ['bbbb', '', ''])
self.assertEqual(re.findall("(?:b*)","bbbba"), ['bbbb', '', ''])
self.assertEqual(re.findall("a(?=b)","a"), [])
self.assertEqual(re.findall("a(?=b)","ab"), ['a'])
self.assertEqual(re.findall("a(?!b)","a"), ['a'])
self.assertEqual(re.findall("a(?!b)","ab"), [])
pattern = r"\n"
self.assertEqual(re.findall(pattern, "\n"), ['\n'])
self.assertEqual(re.findall(pattern, "\n\n"), ['\n', '\n'])
self.assertEqual(re.findall(pattern, "x\nx"), ['\n'])
self.assertEqual(re.findall(pattern, "x\nx\n"), ['\n', '\n'])
pattern = r"\t"
self.assertEqual(re.findall(pattern, "\t"), ['\t'])
self.assertEqual(re.findall(pattern, "\t\t"), ['\t', '\t'])
self.assertEqual(re.findall(pattern, "x\tx"), ['\t'])
self.assertEqual(re.findall(pattern, "x\tx\t"), ['\t', '\t'])
# issue1148
self.assertEqual(re.findall(r"[^c|p]at", r"mat cat hat pat"), ['mat', 'hat'])
def test_search(self):
val = re.search("From","dlkjdsljkdlkdsjlk")
self.assertEqual(val, None)
val = re.search("From","dlkjd From kdsjlk")
self.assertTrue(val is not None)
val = re.search("From","From dlkjd From kdsjlk")
self.assertTrue(val is not None)
def helper(match,expected):
if type(expected) == str:
if match:
if match.group(0)==expected: return True
else: return False
else: return False
else:
if match: return True == expected
else: return False == expected
self.assertTrue(helper(re.search(".",""),False))
self.assertTrue(helper(re.search(".","a"),True))
self.assertTrue(helper(re.search(".a","a"),False))
self.assertTrue(helper(re.search("a","a"),True))
self.assertTrue(helper(re.search("a.","a\n"),False))
self.assertTrue(helper(re.search(".a","ba"),True))
self.assertTrue(helper(re.search("^",""),True))
self.assertTrue(helper(re.search("a^",""),False))
self.assertTrue(helper(re.search("^a","ba"),False))
self.assertTrue(helper(re.search("^a","ab"),True))
self.assertTrue(helper(re.search("^a","\na"),False))
self.assertTrue(helper(re.search("a^","a"),False))
self.assertTrue(helper(re.search("$",""),True))
self.assertTrue(helper(re.search("$a","a"),False))
self.assertTrue(helper(re.search("a$","a"),True))
self.assertTrue(helper(re.search("a$","ab"),False))
self.assertTrue(helper(re.search("a$","a\nb"),False))
self.assertTrue(helper(re.search("a$","a\n"),True))
self.assertTrue(helper(re.search("a*",""),""))
self.assertTrue(helper(re.search("ab*","a"),"a"))
self.assertTrue(helper(re.search("ab*","ab"),"ab"))
self.assertTrue(helper(re.search("ab*","abbbbb"),"abbbbb"))
self.assertTrue(helper(re.search("ab*","ba"),"a"))
self.assertTrue(helper(re.search("ab*","bbbb"),False))
self.assertTrue(helper(re.search("a+",""),False))
self.assertTrue(helper(re.search("ab+","a"),False))
self.assertTrue(helper(re.search("ab+","ab"),"ab"))
self.assertTrue(helper(re.search("ab+","abbbbb"),"abbbbb"))
self.assertTrue(helper(re.search("ab+","ba"),False))
self.assertTrue(helper(re.search("ab+","bbbb"),False))
self.assertTrue(helper(re.search("a?",""),""))
self.assertTrue(helper(re.search("ab?","a"),"a"))
self.assertTrue(helper(re.search("ab?","ab"),"ab"))
self.assertTrue(helper(re.search("ab?","abbbbb"),"ab"))
self.assertTrue(helper(re.search("ab?","ba"),"a"))
self.assertTrue(helper(re.search("ab?","bbbb"),False))
self.assertTrue(helper(re.search("a*?","a"),""))
self.assertTrue(helper(re.search("ab*?","abbbb"),"a"))
self.assertTrue(helper(re.search("ab*?","a"),"a"))
self.assertTrue(helper(re.search("ab*?",""),False))
self.assertTrue(helper(re.search("a+?","a"),"a"))
self.assertTrue(helper(re.search("ab+?","abbbb"),"ab"))
self.assertTrue(helper(re.search("ab+?","a"),False))
self.assertTrue(helper(re.search("ab+?",""),False))
self.assertTrue(helper(re.search("a??","a"),""))
self.assertTrue(helper(re.search("ab??","abbbb"),"a"))
self.assertTrue(helper(re.search("ab??","a"),"a"))
self.assertTrue(helper(re.search("ab??",""),False))
self.assertTrue(helper(re.search("a{2}","a"),False))
self.assertTrue(helper(re.search("a{2}","aa"),"aa"))
self.assertTrue(helper(re.search("a{2}","aaa"),"aa"))
self.assertTrue(helper(re.search("a{1,2}b","b"),False))
self.assertTrue(helper(re.search("a{1,2}b","ab"),"ab"))
self.assertTrue(helper(re.search("a{1,2}b","aab"),"aab"))
self.assertTrue(helper(re.search("a{1,2}b","aaab"),"aab"))
self.assertTrue(helper(re.search("a{,2}b","b"),"b"))
self.assertTrue(helper(re.search("a{,2}b","ab"),"ab"))
self.assertTrue(helper(re.search("a{,2}b","aab"),"aab"))
self.assertTrue(helper(re.search("a{,2}b","aaab"),"aab"))
self.assertTrue(helper(re.search("a{2,}b","b"),False))
self.assertTrue(helper(re.search("a{2,}b","ab"),False))
self.assertTrue(helper(re.search("a{2,}b","aab"),"aab"))
self.assertTrue(helper(re.search("a{2,}b","aaab"),"aaab"))
self.assertTrue(helper(re.search("a{3,5}","aaaaaaaaaa"),"aaaaa"))
self.assertTrue(helper(re.search("a{,5}","aaaaaaaaaa"),"aaaaa"))
self.assertTrue(helper(re.search("a{3,}","aaaaaaaaaa"),"aaaaaaaaaa"))
self.assertTrue(helper(re.search("[a,b,c]","abc"),"a"))
self.assertTrue(helper(re.search("[a-z]","bc"),"b"))
self.assertTrue(helper(re.search("[A-Z,0-9]","abcdefg"),False))
self.assertTrue(helper(re.search("[^A-Z]","ABCDEFGaHIJKL"),"a"))
self.assertTrue(helper(re.search("[a*bc]","*"),"*"))
self.assertTrue(helper(re.search("|",""),""))
self.assertTrue(helper(re.search("|a",""),""))
self.assertTrue(helper(re.search("a|b","ba"),"b"))
self.assertTrue(helper(re.search("h|ello","hello"),"h"))
self.assertTrue(helper(re.search("(?:b*)","bbbba"),'bbbb'))
self.assertTrue(helper(re.search("a(?=b)","a"),False))
self.assertTrue(helper(re.search("a(?=b)","ab"),"a"))
self.assertTrue(helper(re.search("a(?!b)","a"),"a"))
self.assertTrue(helper(re.search("a(?!b)","ab"),False))
def test_match(self):
val = re.match("From","dlkjdsljkdlkdsjlk")
self.assertEqual(val, None)
val = re.match("From","dlkjd From kdsjlk")
self.assertTrue(val is None)
val = re.match("From","From dlkjd From kdsjlk")
self.assertTrue(val is not None)
def test_groups(self):
m = re.match('([0-9]+)([a-z]+)','345abu')
self.assertEqual(m.groups(), ('345', 'abu'))
self.assertEqual(m.group(0), "345abu")
self.assertEqual(m.group(1), "345")
self.assertEqual(m.group(2), "abu")
m = re.match('([0-9]+)([a-z]+)([A-Z]*)','345abu')
self.assertEqual(m.groups('default'), tuple(['345','abu','']))
def test_split(self):
a = re.split("a", "A stitch in time saves nine.", flags=re.IGNORECASE)
self.assertEqual(a, ['', ' stitch in time s', 'ves nine.'])
self.assertEqual(re.split("\W+", "Words, words, words."), ['Words', 'words', 'words', ''])
self.assertEqual(re.split("(\W+)", "Words, words, words."), ['Words', ', ', 'words', ', ', 'words', '.', ''])
self.assertEqual(re.split("\W+", "Words, words, words.", 1), ['Words', 'words, words.'])
self.assertEqual(re.split('[a-f]+', '0a3B9', 0, re.IGNORECASE), ['0', '3', '9'])
self.assertEqual(re.split("(\W+)", '...words, words...'), ['', '...', 'words', ', ', 'words', '...', ''])
#Skulpt fails the test below and it shouldn't
#self.assertEqual(re.split('x*', 'foo'), ['', 'f', 'o', 'o', ''])
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
import os
from setuptools import setup
def package_files(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
extra_files = package_files('sejits4fpgas/hw')
extra_files.append('*.config')
setup(
name='Sejits4Fpgas',
version='0.1',
packages=['sejits4fpgas', 'sejits4fpgas.src'],
package_dir={'sejits4fpgas': 'sejits4fpgas'},
package_data={
'sejits4fpgas':extra_files
},
url='',
license='',
author='Philipp Ebensberger',
author_email='contact@3bricks-software.de',
description='',
install_requires=["numpy", "scikit-image", "scipy", "pytest"],
)
|
nilq/baby-python
|
python
|
# ======================================================================================================================
# Fakulta informacnich technologii VUT v Brne
# Bachelor thesis
# Author: Filip Bali (xbalif00)
# License: MIT
# ======================================================================================================================
from django.contrib import admin
from .models import Profile
# Register your models here.
admin.site.register(Profile)
|
nilq/baby-python
|
python
|
import numpy as np
import math
def nanRound(vs, *args, **kw):
def fun(v):
if math.isnan(v): return(v)
return np.around(v, *args, **kw)
return [fun(i) for i in vs]
|
nilq/baby-python
|
python
|
import os
import sys
import pprint
from PIL import Image
from av import open
video = open(sys.argv[1])
stream = next(s for s in video.streams if s.type == b'video')
for packet in video.demux(stream):
for frame in packet.decode():
frame.to_image().save('sandbox/%04d.jpg' % frame.index)
if frame_count > 5:
break
|
nilq/baby-python
|
python
|
from markdownify import markdownify as md, ATX, ATX_CLOSED, BACKSLASH, UNDERSCORE
import re
nested_uls = """
<ul>
<li>1
<ul>
<li>a
<ul>
<li>I</li>
<li>II</li>
<li>III</li>
</ul>
</li>
<li>b</li>
<li>c</li>
</ul>
</li>
<li>2</li>
<li>3</li>
</ul>"""
nested_ols = """
<ol>
<li>1
<ol>
<li>a
<ol>
<li>I</li>
<li>II</li>
<li>III</li>
</ol>
</li>
<li>b</li>
<li>c</li>
</ol>
</li>
<li>2</li>
<li>3</li>
</ul>"""
table = re.sub(r'\s+', '', """
<table>
<tr>
<th>Firstname</th>
<th>Lastname</th>
<th>Age</th>
</tr>
<tr>
<td>Jill</td>
<td>Smith</td>
<td>50</td>
</tr>
<tr>
<td>Eve</td>
<td>Jackson</td>
<td>94</td>
</tr>
</table>
""")
table_head_body = re.sub(r'\s+', '', """
<table>
<thead>
<tr>
<th>Firstname</th>
<th>Lastname</th>
<th>Age</th>
</tr>
</thead>
<tbody>
<tr>
<td>Jill</td>
<td>Smith</td>
<td>50</td>
</tr>
<tr>
<td>Eve</td>
<td>Jackson</td>
<td>94</td>
</tr>
</tbody>
</table>
""")
table_missing_text = re.sub(r'\s+', '', """
<table>
<thead>
<tr>
<th></th>
<th>Lastname</th>
<th>Age</th>
</tr>
</thead>
<tbody>
<tr>
<td>Jill</td>
<td></td>
<td>50</td>
</tr>
<tr>
<td>Eve</td>
<td>Jackson</td>
<td>94</td>
</tr>
</tbody>
</table>
""")
def test_chomp():
assert md(' <b></b> ') == ' '
assert md(' <b> </b> ') == ' '
assert md(' <b> </b> ') == ' '
assert md(' <b> </b> ') == ' '
assert md(' <b>s </b> ') == ' **s** '
assert md(' <b> s</b> ') == ' **s** '
assert md(' <b> s </b> ') == ' **s** '
assert md(' <b> s </b> ') == ' **s** '
def test_a():
assert md('<a href="https://google.com">Google</a>') == '[Google](https://google.com)'
assert md('<a href="https://google.com">https://google.com</a>', autolinks=False) == '[https://google.com](https://google.com)'
assert md('<a href="https://google.com">https://google.com</a>') == '<https://google.com>'
assert md('<a href="https://community.kde.org/Get_Involved">https://community.kde.org/Get_Involved</a>') == '<https://community.kde.org/Get_Involved>'
assert md('<a href="https://community.kde.org/Get_Involved">https://community.kde.org/Get_Involved</a>', autolinks=False) == '[https://community.kde.org/Get\\_Involved](https://community.kde.org/Get_Involved)'
def test_a_spaces():
assert md('foo <a href="http://google.com">Google</a> bar') == 'foo [Google](http://google.com) bar'
assert md('foo<a href="http://google.com"> Google</a> bar') == 'foo [Google](http://google.com) bar'
assert md('foo <a href="http://google.com">Google </a>bar') == 'foo [Google](http://google.com) bar'
assert md('foo <a href="http://google.com"></a> bar') == 'foo bar'
def test_a_with_title():
text = md('<a href="http://google.com" title="The "Goog"">Google</a>')
assert text == r'[Google](http://google.com "The \"Goog\"")'
def test_a_shortcut():
text = md('<a href="http://google.com">http://google.com</a>')
assert text == '<http://google.com>'
def test_a_no_autolinks():
text = md('<a href="http://google.com">http://google.com</a>', autolinks=False)
assert text == '[http://google.com](http://google.com)'
def test_b():
assert md('<b>Hello</b>') == '**Hello**'
def test_b_spaces():
assert md('foo <b>Hello</b> bar') == 'foo **Hello** bar'
assert md('foo<b> Hello</b> bar') == 'foo **Hello** bar'
assert md('foo <b>Hello </b>bar') == 'foo **Hello** bar'
assert md('foo <b></b> bar') == 'foo bar'
def test_blockquote():
assert md('<blockquote>Hello</blockquote>') == '\n> Hello\n\n'
def test_blockquote_with_paragraph():
assert md('<blockquote>Hello</blockquote><p>handsome</p>') == '\n> Hello\n\nhandsome\n\n'
def test_nested_blockquote():
text = md('<blockquote>And she was like <blockquote>Hello</blockquote></blockquote>')
assert text == '\n> And she was like \n> > Hello\n> \n> \n\n'
def test_br():
assert md('a<br />b<br />c') == 'a \nb \nc'
def test_em():
assert md('<em>Hello</em>') == '*Hello*'
def test_em_spaces():
assert md('foo <em>Hello</em> bar') == 'foo *Hello* bar'
assert md('foo<em> Hello</em> bar') == 'foo *Hello* bar'
assert md('foo <em>Hello </em>bar') == 'foo *Hello* bar'
assert md('foo <em></em> bar') == 'foo bar'
def test_h1():
assert md('<h1>Hello</h1>') == 'Hello\n=====\n\n'
def test_h2():
assert md('<h2>Hello</h2>') == 'Hello\n-----\n\n'
def test_hn():
assert md('<h3>Hello</h3>') == '### Hello\n\n'
assert md('<h6>Hello</h6>') == '###### Hello\n\n'
def test_hn_chained():
assert md('<h1>First</h1>\n<h2>Second</h2>\n<h3>Third</h3>', heading_style=ATX) == '# First\n\n\n## Second\n\n\n### Third\n\n'
assert md('X<h1>First</h1>', heading_style=ATX) == 'X# First\n\n'
def test_hn_nested_tag_heading_style():
assert md('<h1>A <p>P</p> C </h1>', heading_style=ATX_CLOSED) == '# A P C #\n\n'
assert md('<h1>A <p>P</p> C </h1>', heading_style=ATX) == '# A P C\n\n'
def test_hn_nested_simple_tag():
tag_to_markdown = [
("strong", "**strong**"),
("b", "**b**"),
("em", "*em*"),
("i", "*i*"),
("p", "p"),
("a", "a"),
("div", "div"),
("blockquote", "blockquote"),
]
for tag, markdown in tag_to_markdown:
assert md('<h3>A <' + tag + '>' + tag + '</' + tag + '> B</h3>') == '### A ' + markdown + ' B\n\n'
assert md('<h3>A <br>B</h3>', heading_style=ATX) == '### A B\n\n'
# Nested lists not supported
# assert md('<h3>A <ul><li>li1</i><li>l2</li></ul></h3>', heading_style=ATX) == '### A li1 li2 B\n\n'
def test_hn_nested_img():
assert md('<img src="/path/to/img.jpg" alt="Alt text" title="Optional title" />') == ''
assert md('<img src="/path/to/img.jpg" alt="Alt text" />') == ''
image_attributes_to_markdown = [
("", ""),
("alt='Alt Text'", "Alt Text"),
("alt='Alt Text' title='Optional title'", "Alt Text"),
]
for image_attributes, markdown in image_attributes_to_markdown:
assert md('<h3>A <img src="/path/to/img.jpg " ' + image_attributes + '/> B</h3>') == '### A ' + markdown + ' B\n\n'
def test_hr():
assert md('Hello<hr>World') == 'Hello\n\n---\n\nWorld'
assert md('Hello<hr />World') == 'Hello\n\n---\n\nWorld'
assert md('<p>Hello</p>\n<hr>\n<p>World</p>') == 'Hello\n\n\n\n\n---\n\n\nWorld\n\n'
def test_head():
assert md('<head>head</head>') == 'head'
def test_atx_headings():
assert md('<h1>Hello</h1>', heading_style=ATX) == '# Hello\n\n'
assert md('<h2>Hello</h2>', heading_style=ATX) == '## Hello\n\n'
def test_atx_closed_headings():
assert md('<h1>Hello</h1>', heading_style=ATX_CLOSED) == '# Hello #\n\n'
assert md('<h2>Hello</h2>', heading_style=ATX_CLOSED) == '## Hello ##\n\n'
def test_i():
assert md('<i>Hello</i>') == '*Hello*'
def test_ol():
assert md('<ol><li>a</li><li>b</li></ol>') == '1. a\n2. b\n'
assert md('<ol start="3"><li>a</li><li>b</li></ol>') == '3. a\n4. b\n'
def test_p():
assert md('<p>hello</p>') == 'hello\n\n'
def test_strong():
assert md('<strong>Hello</strong>') == '**Hello**'
def test_ul():
assert md('<ul><li>a</li><li>b</li></ul>') == '* a\n* b\n'
def test_nested_ols():
assert md(nested_ols) == '\n1. 1\n\t1. a\n\t\t1. I\n\t\t2. II\n\t\t3. III\n\t2. b\n\t3. c\n2. 2\n3. 3\n'
def test_inline_ul():
assert md('<p>foo</p><ul><li>a</li><li>b</li></ul><p>bar</p>') == 'foo\n\n* a\n* b\n\nbar\n\n'
def test_nested_uls():
"""
Nested ULs should alternate bullet characters.
"""
assert md(nested_uls) == '\n* 1\n\t+ a\n\t\t- I\n\t\t- II\n\t\t- III\n\t+ b\n\t+ c\n* 2\n* 3\n'
def test_bullets():
assert md(nested_uls, bullets='-') == '\n- 1\n\t- a\n\t\t- I\n\t\t- II\n\t\t- III\n\t- b\n\t- c\n- 2\n- 3\n'
def test_li_text():
assert md('<ul><li>foo <a href="#">bar</a></li><li>foo bar </li><li>foo <b>bar</b> <i>space</i>.</ul>') == '* foo [bar](#)\n* foo bar\n* foo **bar** *space*.\n'
def test_img():
assert md('<img src="/path/to/img.jpg" alt="Alt text" title="Optional title" />') == ''
assert md('<img src="/path/to/img.jpg" alt="Alt text" />') == ''
def test_div():
assert md('Hello</div> World') == 'Hello World'
def test_table():
assert md(table) == '| Firstname | Lastname | Age |\n| --- | --- | --- |\n| Jill | Smith | 50 |\n| Eve | Jackson | 94 |'
assert md(table_head_body) == '| Firstname | Lastname | Age |\n| --- | --- | --- |\n| Jill | Smith | 50 |\n| Eve | Jackson | 94 |'
assert md(table_missing_text) == '| | Lastname | Age |\n| --- | --- | --- |\n| Jill | | 50 |\n| Eve | Jackson | 94 |'
def test_strong_em_symbol():
assert md('<strong>Hello</strong>', strong_em_symbol=UNDERSCORE) == '__Hello__'
assert md('<b>Hello</b>', strong_em_symbol=UNDERSCORE) == '__Hello__'
assert md('<em>Hello</em>', strong_em_symbol=UNDERSCORE) == '_Hello_'
assert md('<i>Hello</i>', strong_em_symbol=UNDERSCORE) == '_Hello_'
def test_newline_style():
assert md('a<br />b<br />c', newline_style=BACKSLASH) == 'a\\\nb\\\nc'
|
nilq/baby-python
|
python
|
from Assignment2 import *
def test_case1():
cost = [[0,0,0,0],
[0,0,5,10],
[0,-1,0,5],
[0,-1,-1,0]
]
print(UCS_Traversal(cost,1,[3]))
def test_case2():
cost = [[0,0,0,0,0],
[0,0,0,10,5],
[0,-1,0,5,0],
[0,-1,-1,0,0],
[0,-1,-1,5,0]
]
print(UCS_Traversal(cost,1,[3]))
def test_case3():
cost = [[0,0,0,0,0,0,0],
[0,0,2,0,0,10,7],
[0,0,0,3,0,0,0],
[0,0,0,0,0,0,2],
[0,0,0,0,0,0,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,3,0],
]
print(UCS_Traversal(cost,1,[5]))
def test_case4():
cost = [[0,0,0,0,0,0,0],
[0,0,2,0,0,10,7],
[0,0,0,3,0,0,0],
[0,0,0,0,2,0,2],
[0,0,0,0,0,3,0],
[0,0,0,0,0,0,0],
[0,0,0,0,0,3,0],
]
print(UCS_Traversal(cost,1,[5]))
def test_case5():
cost = [[0,0,0,0,0,0,0],
[0,0,2,-1,-1,10,-1],
[0,-1,0,2,-1,-1,-1],
[0,-1,-1,0,2,-1,-1],
[0,-1,-1,-1,0,-1,2],
[0,-1,-1,-1,-1,0,-1],
[0,-1,-1,-1,-1,2,0]
]
print(UCS_Traversal(cost,1,[5]))
test_case1()
test_case2()
test_case3()
test_case4()
test_case5()
|
nilq/baby-python
|
python
|
import numpy as np
import sys, time, glob
#caffe_root = "/home/vagrant/caffe/"
#sys.path.insert(0, caffe_root + 'python')
import caffe
from sklearn.metrics import accuracy_score
from random import shuffle
from sklearn import svm
def init_net():
net = caffe.Classifier(caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt',
caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
net.set_phase_test()
net.set_mode_cpu()
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
net.set_mean('data', np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy')) # ImageNet mean
net.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
net.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
return net
def get_features(file, net):
#print "getting features for", file
scores = net.predict([caffe.io.load_image(file)])
feat = net.blobs['fc7'].data[4][:,0, 0]
return feat
def shuffle_data(features, labels):
new_features, new_labels = [], []
index_shuf = range(len(features))
shuffle(index_shuf)
for i in index_shuf:
new_features.append(features[i])
new_labels.append(labels[i])
return new_features, new_labels
def get_dataset(net, A_DIR, B_DIR):
CLASS_A_IMAGES = glob.glob(A_DIR + "/*.jpg")
CLASS_B_IMAGES = glob.glob(B_DIR + "/*.jpg")
CLASS_A_FEATURES = map(lambda f: get_features(f, net), CLASS_A_IMAGES)
CLASS_B_FEATURES = map(lambda f: get_features(f, net), CLASS_B_IMAGES)
features = CLASS_A_FEATURES + CLASS_B_FEATURES
labels = [0] * len(CLASS_A_FEATURES) + [1] * len(CLASS_B_FEATURES)
return shuffle_data(features, labels)
net = init_net()
x, y = get_dataset(net, sys.argv[1], sys.argv[2])
l = int(len(y) * 0.4)
x_train, y_train = x[: l], y[: l]
x_test, y_test = x[l : ], y[l : ]
clf = svm.SVC()
clf.fit(x_train, y_train)
y_pred = clf.predict(x_test)
print "Accuracy: %.3f" % accuracy_score(y_test, y_pred)
|
nilq/baby-python
|
python
|
import numpy as np
import py2neo as pn
import random
import math
import copy
def create_ba_subgraph(M, m_0, n, relation):
# Generate Users-Friends un-directed subgraph
# Generate Users-Follower directed subgraph
node_list = []
for i in range(m_0):
node_list.append(pn.Node('User', name='user' + str(i)))
rel_list = []
for i in range(len(node_list)):
for j in range((i+1),len(node_list)):
rel_list.append(pn.Relationship(node_list[i], relation, node_list[j]))
# 1. Creat node list by node function
# 2. Creat relation list by Relation function
# 3. Creat subgraph by Subgraph function
t = M - m_0 # number of iteration
k = [] # save nodes degree
k_0 = m_0 - 1
p_k = [] # save nodes priority probability
p_0 = 1/m_0
k_all = 0
for i in range(m_0):
p_k.append(p_0)
k.append(k_0)
k_all += k_0
for i in range(t):
m_0_t = m_0 + i # number of nodes at time t
m_0_1 = m_0 + i - 1 # number of nodes at time t-1
node_list.append(pn.Node('User', name='user' + str(m_0_t))) # add one node
add_edge = 1
j_choose = -1
while(add_edge <= n):
for j in range(m_0_t):
if j != j_choose: # to ensure no repeated edges
p_random = random.random()
if p_random <= p_k[j] and add_edge <= n:
j_choose = j
k_j = k[j]
p_k_j = p_k[j]
r_random = random.random()
if r_random > 0.5:
rel_list.append(pn.Relationship(node_list[j], relation, node_list[-1]))
else:
rel_list.append(pn.Relationship(node_list[-1], relation, node_list[j]))
add_edge += 1
k[j] = k_j + 1
k_all += 2
p_k[j] = (k_j + 1)/k_all
k.append(n)
p_k.append(n/k_all)
s = pn.Subgraph(node_list,rel_list)
return s
def create_pp_ba_subgraph(M, m_0, n, post_u0, N, graph):
post_u0_list = np.random.choice(N,m_0)
post_0_list = list(range(m_0))
user_list = [pn.Node('User', name='user' + str(i)) for i in N]
k = [0] * len(N) # list save the number of posts by users
node_list = []
for i in post_0_list:
node_list.append(pn.Node('Post', name='post' + str(i)))
rel_list = []
for i in range(len(node_list)):
p_u = np.random.choice(post_u0_list,1)[0]
k[p_u] += 1
rel_list.append(pn.Relationship(user_list[p_u], 'published', node_list[i]))
# 1. Creat node list by Node function
# 2. Creat relation list by Relation function
# 3. Creat subgraph by Subgraph function
t = M - m_0 # number of iteration
k_all_friends = 0
for i in N:
friends = list(graph.run('MATCH (n:User {name:"user' + str(i) + '"})-[:friends]-(a) return count(a)').data()[0].values())[0]
k_all_friends += friends
k_all_follow = 0
for i in N:
follow = list(graph.run('MATCH (n:User {name:"user' + str(i) + '"})<-[:follow]-(a) return count(a)').data()[0].values())[0]
k_all_follow += follow
for i in range(t):
m_0_t = m_0 + i # number of nodes at time t
node_list.append(pn.Node('Post', name='post' + str(m_0_t))) # add one node
p_j = [0] * len(N) # save list of probability
for j in N:
p_j_friends = list(graph.run('MATCH (n:User {name:"user' + str(j) + '"})-[:friends]-(a) return count(a)').data()[0].values())[0]
p_j_follow = list(graph.run('MATCH (n:User {name:"user' + str(j) + '"})<-[:follow]-(a) return count(a)').data()[0].values())[0]
p_j[j] = (p_j_friends + p_j_follow + k[j]) / (k_all_follow + k_all_friends + sum(k))
user = np.random.choice(user_list,1, p=p_j)[0] # roulette wheel selection
rel_list.append(pn.Relationship(user, 'published', node_list[-1]))
k[user_list.index(user)] += 1
s = pn.Subgraph(node_list,rel_list)
return s
def create_pv_ba_subgraph(P,N,graph):
# depends on friends/followers/reads
user_list = [pn.Node('User', name='user' + str(i)) for i in N]
post_list = [pn.Node('Post', name='post' + str(i)) for i in P]
view_list = [0] * len(P)
rel_list = []
k_all_friends = 0
k_all_follow = 0
for i in P:
user = list(graph.run('match (n:Post{name:"post' + str(i) + '"})<-[:published]-(a) return a').data()[0].values())[0].nodes[0]['name']
friends = list(graph.run('MATCH (n:User {name:"' + user + '"})-[:friends]-(a) return count(a)').data()[0].values())[0]
k_all_friends += friends
follow = list(graph.run('MATCH (n:User {name:"' + user + '"})<-[:follow]-(a) return count(a)').data()[0].values())[0]
k_all_follow += follow
for i in P:
user = list(graph.run('match (n:Post{name:"post' + str(i) + '"})<-[:published]-(a) return a').data()[0].values())[0].nodes[0]['name']
user_list_m = copy.deepcopy(user_list)
for n in user_list_m:
if n['name'] == user:
user_list_m.remove(n)
friends = list(graph.run('MATCH (n:User {name:"' + user + '"})-[:friends]-(a) return count(a)').data()[0].values())[0]
follow = list(graph.run('MATCH (n:User {name:"' + user + '"})<-[:follow]-(a) return count(a)').data()[0].values())[0]
p = (friends + follow + view_list[i]) / (k_all_friends + k_all_follow + sum(view_list))
p_random = random.random()
if p_random <= p:
user_choice = np.random.choice(user_list_m,1)[0]
rel_list.append(pn.Relationship(user_choice,'viewed',post_list[i]))
view_list[i] += 1
s = pn.Subgraph(post_list,rel_list)
return s
def create_pl_ba_subgraph(P,N,graph):
user_list = [pn.Node('User', name='user' + str(i)) for i in N]
post_list = [pn.Node('Post', name='post' + str(i)) for i in P]
liked_list = [0] * len(P)
rel_list = []
k_all_friends = 0
k_all_follow = 0
k_all_posted = 0
for i in P:
user = list(graph.run('match (n:Post{name:"post' + str(i) + '"})<-[:published]-(a) return a').data()[0].values())[0].nodes[0]['name']
friends = list(graph.run('MATCH (n:User {name:"' + user + '"})-[:friends]-(a) return count(a)').data()[0].values())[0]
k_all_friends += friends
follow = list(graph.run('MATCH (n:User {name:"' + user + '"})<-[:follow]-(a) return count(a)').data()[0].values())[0]
k_all_follow += follow
post = list(graph.run('MATCH (n:User {name:"' + user + '"})-[:published]->(a) return count(a)').data()[0].values())[0]
k_all_posted += post
for i in P:
user = list(graph.run('match (n:Post{name:"post' + str(i) + '"})<-[:published]-(a) return a').data()[0].values())[0].nodes[0]['name']
user_list_m = copy.deepcopy(user_list)
for n in user_list_m:
if n['name'] == user:
user_list_m.remove(n)
friends = list(graph.run('MATCH (n:User {name:"' + user + '"})-[:friends]-(a) return count(a)').data()[0].values())[0]
follow = list(graph.run('MATCH (n:User {name:"' + user + '"})<-[:follow]-(a) return count(a)').data()[0].values())[0]
post = list(graph.run('MATCH (n:User {name:"' + user + '"})-[:published]->(a) return count(a)').data()[0].values())[0]
for j in N:
p = (friends + follow + post + liked_list[i]) / (k_all_friends + k_all_follow + k_all_posted + sum(liked_list))
p_random = random.random()
if p_random <= p:
user_choice = np.random.choice(user_list_m,1)[0]
rel_list.append(pn.Relationship(user_choice,'liked',post_list[i]))
liked_list[i] += 1
s = pn.Subgraph(post_list,rel_list)
return s
if __name__ == '__main__':
users = 20 # users nodes
posts = 30 # posts
post_0, post_u0 = 3,3
m_0 = 3 # initial nodes
n = 2 # every time the new node connect to n known nodes, n<=n_user0
N = list(range(users))
P = list(range(posts))
# start a new project in Neo4j and set connections
graph = pn.Graph(
host = 'localhost',
http_port = '7474',
user = 'neo4j',
password = '2500'
)
# stage 1
s_user_friend = create_ba_subgraph(users, m_0, n, relation='friends')
# stage 2
s_user_follow = create_ba_subgraph(users, m_0, n, relation='follow')
graph.run('match (n:User) detach delete n')
graph.create(s_user_friend)
# stage 3
graph.merge(s_user_follow,'User','name')
# stage 4
s_posts_publish = create_pp_ba_subgraph(posts, post_0, n, post_u0, N, graph)
# stage 5
graph.merge(s_posts_publish,'User','name')
# stage 6
s_posts_viewed = create_pv_ba_subgraph(P,N,graph)
# stage 7
graph.merge(s_posts_viewed,'User','name')
# stage 8
s_posts_liked = create_pl_ba_subgraph(P,N,graph)
# stage 9
graph.merge(s_posts_liked,'User','name')
|
nilq/baby-python
|
python
|
# coding=utf-8
from __future__ import print_function
import authcode
from sqlalchemy_wrapper import SQLAlchemy
from helpers import SECRET_KEY
def test_user_model():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db, roles=True)
assert auth.users_model_name == 'User'
assert auth.roles_model_name == 'Role'
User = auth.User
db.create_all()
user = User(login=u'meh', password='foobar')
db.session.add(user)
db.commit()
assert user.login == u'meh'
assert user.email == user.login
assert hasattr(user, 'password')
assert hasattr(user, 'last_sign_in')
assert repr(user) == '<User meh>'
def test_user_model_to_dict():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db, roles=True)
User = auth.User
db.create_all()
user = User(login=u'meh', password='foobar')
db.session.add(user)
db.commit()
user_dict = user.to_dict()
assert user_dict
def test_backwards_compatibility():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db)
User = auth.User
db.create_all()
user = User(login=u'meh', password='foobar')
db.session.add(user)
db.commit()
assert user._password == user.password
user._password = 'raw'
assert user.password == 'raw'
def test_user_model_methods():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db)
User = auth.User
db.create_all()
user = User(login=u'meh', password='foobar')
db.session.add(user)
db.commit()
assert User.by_id(user.id) == user
assert User.by_id(33) is None
assert User.by_login(u'meh') == user
assert User.by_login(u'foobar') is None
assert user.has_password('foobar')
assert not user.has_password('abracadabra')
assert user.get_token()
assert user.get_uhmac()
def test_set_raw_password():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db, roles=True)
User = auth.User
db.create_all()
user = User(login=u'meh', password='foobar')
db.session.add(user)
db.session.commit()
assert user.password != 'foobar'
user.set_raw_password('foobar')
assert user.password == 'foobar'
def test_role_model():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db, roles=True)
Role = auth.Role
db.create_all()
role = Role(name=u'admin')
db.session.add(role)
db.commit()
assert role.name == u'admin'
assert repr(role) == '<Role admin>'
def test_role_model_to_dict():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db, roles=True)
Role = auth.Role
db.create_all()
role = Role(name=u'admin')
db.session.add(role)
db.commit()
role_dict = role.to_dict()
assert role_dict
def test_role_model_methods():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db, roles=True)
Role = auth.Role
db.create_all()
role = Role(name=u'admin')
db.session.add(role)
db.commit()
assert Role.by_id(role.id) == role
assert Role.by_id(33) is None
assert Role.by_name(u'admin') == role
assert Role.by_name(u'foobar') is None
assert Role.get_or_create(u'admin') == role
role2 = Role.get_or_create(u'owner')
db.commit()
assert role2 != role
assert db.query(Role).count() == 2
assert list(role.users) == []
assert list(role2.users) == []
def test_add_role():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db, roles=True)
User = auth.User
Role = auth.Role
db.create_all()
user = User(login=u'meh', password='foobar')
db.session.add(user)
role = Role(name=u'loremipsum')
db.session.add(role)
db.session.commit()
assert hasattr(auth, 'Role')
assert hasattr(User, 'roles')
# Add nonexistant role creates it
user.add_role('admin')
db.session.commit()
assert user.has_role('admin')
assert db.query(Role).count() == 2
assert list(user.roles) == [Role.by_name('admin')]
# Adding the same role does nothing
user.add_role('admin')
db.session.commit()
assert user.has_role('admin')
assert db.query(Role).count() == 2
assert list(user.roles) == [Role.by_name('admin')]
# Adding an existent role does not create a new one
user.add_role('loremipsum')
db.session.commit()
assert user.has_role('loremipsum')
result = sorted([role.name for role in user.roles])
assert result == ['admin', 'loremipsum']
assert db.query(Role).count() == 2
def test_remove_role():
db = SQLAlchemy('sqlite:///:memory:')
auth = authcode.Auth(SECRET_KEY, db=db, roles=True)
User = auth.User
Role = auth.Role
db.create_all()
user = User(login=u'meh', password='foobar')
db.session.add(user)
db.session.commit()
assert hasattr(auth, 'Role')
assert hasattr(User, 'roles')
user.add_role('admin')
db.session.commit()
assert user.has_role('admin')
assert db.query(Role).count() == 1
# Removed from user but not deleted
user.remove_role('admin')
db.session.commit()
assert not user.has_role('admin')
assert list(user.roles) == []
assert db.query(Role).count() == 1
# Removing a role it doesn't have does nothing
user.remove_role('admin')
db.session.commit()
assert not user.has_role('admin')
assert list(user.roles) == []
assert db.query(Role).count() == 1
# Removing a nonexistant role does nothing
user.remove_role('foobar')
db.session.commit()
assert db.query(Role).count() == 1
def test_models_mixins():
db = SQLAlchemy('sqlite:///:memory:')
class UserMixin(object):
email = db.Column(db.Unicode(300))
def __repr__(self):
return 'overwrited'
class RoleMixin(object):
description = db.Column(db.UnicodeText)
auth = authcode.Auth(SECRET_KEY, db=db, UserMixin=UserMixin, RoleMixin=RoleMixin)
User = auth.User
Role = auth.Role
db.create_all()
user = User(login=u'meh', password='foobar', email=u'text@example.com')
db.session.add(user)
db.flush()
assert User.__tablename__ == 'users'
assert user.login == u'meh'
assert user.email == u'text@example.com'
assert hasattr(user, 'password')
assert hasattr(user, 'last_sign_in')
assert repr(user) == 'overwrited'
assert hasattr(Role, 'description')
def test_naked_sqlalchemy():
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import scoped_session, sessionmaker
engine = create_engine('sqlite://')
class DB(object):
Session = scoped_session(sessionmaker(bind=engine))
Model = declarative_base()
@property
def session(self):
return self.Session()
db = DB()
auth = authcode.Auth(SECRET_KEY, db=db)
User = auth.User
db.Model.metadata.create_all(bind=engine)
user = User(login=u'meh', password='foobar')
db.session.add(user)
db.session.commit()
assert User.by_id(user.id) == user
assert User.by_id(33) is None
assert User.by_login(u'meh') == user
assert User.by_login(u'foobar') is None
assert user.has_password('foobar')
assert not user.has_password('abracadabra')
assert user.get_token()
assert user.get_uhmac()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
def fluxo_caixa():
fluxo = db(MovimentoCaixa).select()
return locals()
def entradas():
entradas = db(Entradas).select()
return locals()
def n_entrada():
entrada_id = request.vars.entrada
if entrada_id is None:
form = SQLFORM(Entradas, fields=['descricao', 'valor', 'obs', 'created_on'])
else:
form = SQLFORM(Entradas, entrada_id, showid=False, deletable=True,
fields=['descricao', 'valor', 'obs', 'created_on'])
if form.process().accepted:
valor = float(request.vars.valor)
saldo = soma_saldo(valor)
MovimentoCaixa.insert(saldo_inicial=saldo[0], entrada=valor,
saida=0, saldo_final=saldo[1])
redirect(URL('financeiro', 'entradas'))
elif form.errors:
response.flash = 'Ops, confira os campos!'
return locals()
def saidas():
saidas = db(Saidas).select()
return locals()
def n_saida():
saida_id = request.vars.saida_id
if saida_id is None:
form = SQLFORM(Saidas, fields=['descricao', 'valor', 'obs', 'created_on'])
else:
form = SQLFORM(Saidas, saida_id, showid=False, deletable=True,
fields=['descricao', 'valor', 'obs', 'created_on'])
if form.process().accepted:
redirect(URL('financeiro', 'saidas'))
elif form.errors:
response.flash = 'Ops, confira os campos!'
return locals()
|
nilq/baby-python
|
python
|
# (C) British Crown Copyright 2013 - 2018, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""
Tests for the Geostationary projection.
"""
from __future__ import (absolute_import, division, print_function)
from numpy.testing import assert_almost_equal
import cartopy.crs as ccrs
def check_proj4_params(name, crs, other_args):
expected = other_args | {'proj={}'.format(name), 'units=m', 'no_defs'}
pro4_params = set(crs.proj4_init.lstrip('+').split(' +'))
assert expected == pro4_params
class TestGeostationary(object):
test_class = ccrs.Geostationary
expected_proj_name = 'geos'
def adjust_expected_params(self, expected):
# Only for Geostationary do we expect the sweep parameter
if self.expected_proj_name == 'geos':
expected.add('sweep=y')
def test_default(self):
geos = self.test_class()
other_args = {'ellps=WGS84', 'h=35785831', 'lat_0=0.0', 'lon_0=0.0',
'x_0=0', 'y_0=0'}
self.adjust_expected_params(other_args)
check_proj4_params(self.expected_proj_name, geos, other_args)
assert_almost_equal(geos.boundary.bounds,
(-5434177.81588539, -5434177.81588539,
5434177.81588539, 5434177.81588539),
decimal=4)
def test_eccentric_globe(self):
globe = ccrs.Globe(semimajor_axis=10000, semiminor_axis=5000,
ellipse=None)
geos = self.test_class(satellite_height=50000,
globe=globe)
other_args = {'a=10000', 'b=5000', 'h=50000', 'lat_0=0.0', 'lon_0=0.0',
'x_0=0', 'y_0=0'}
self.adjust_expected_params(other_args)
check_proj4_params(self.expected_proj_name, geos, other_args)
assert_almost_equal(geos.boundary.bounds,
(-8372.4040, -4171.5043, 8372.4040, 4171.5043),
decimal=4)
def test_eastings(self):
geos = self.test_class(false_easting=5000000,
false_northing=-125000,)
other_args = {'ellps=WGS84', 'h=35785831', 'lat_0=0.0', 'lon_0=0.0',
'x_0=5000000', 'y_0=-125000'}
self.adjust_expected_params(other_args)
check_proj4_params(self.expected_proj_name, geos, other_args)
assert_almost_equal(geos.boundary.bounds,
(-434177.81588539, -5559177.81588539,
10434177.81588539, 5309177.81588539),
decimal=4)
def test_sweep(self):
geos = ccrs.Geostationary(sweep_axis='x')
other_args = {'ellps=WGS84', 'h=35785831', 'lat_0=0.0', 'lon_0=0.0',
'sweep=x', 'x_0=0', 'y_0=0'}
check_proj4_params(self.expected_proj_name, geos, other_args)
pt = geos.transform_point(-60, 25, ccrs.PlateCarree())
assert_almost_equal(pt,
(-4529521.6442, 2437479.4195),
decimal=4)
|
nilq/baby-python
|
python
|
import sys, os, Jati
class Serve:
def run(self, host, port, sites, log_file, isSSL):
current_dir = os.getcwd()
if current_dir not in sys.path:
sys.path.insert(1, current_dir)
try:
jati = Jati.Jati(host=host, port=port, isSSL=None, log_file=log_file)
jati.addVHost(sites)
jati.start()
except KeyboardInterrupt:
print("closing")
jati.close()
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Classifier
admin.site.register(Classifier)
|
nilq/baby-python
|
python
|
def cgi_content(type="text/html"):
return('Content type: ' + type + '\n\n')
def webpage_start():
return('<html>')
def web_title(title):
return('<head><title>' + title + '</title></head>')
def body_start(h1_message):
return('<h1 align="center">' + h1_message + '</h1><p align="center">')
def body_end():
return("</p><br><p align='center'><a href='../index.html'>HOME</a></p></body>")
def webpage_end():
return('</html>')
|
nilq/baby-python
|
python
|
"""Class to host an MlModel object."""
import logging
import json
from aiokafka import AIOKafkaProducer, AIOKafkaConsumer
from model_stream_processor import __name__
from model_stream_processor.model_manager import ModelManager
logger = logging.getLogger(__name__)
class MLModelStreamProcessor(object):
"""Processor class for MLModel stream processors."""
def __init__(self, model_qualified_name, loop, bootstrap_servers):
"""Create an agent for a model.
:param model_qualified_name: The qualified name of the model that will be hosted in this stream processor.
:type model: str
:param loop: The asyncio event loop to be used by the stream processor.
:type loop: _UnixSelectorEventLoop
:param bootstrap_servers: The kafka brokers to connect to.
:type bootstrap_servers: str
:returns: An instance of MLModelStreamProcessor.
:rtype: MLModelStreamProcessor
"""
model_manager = ModelManager()
self._model = model_manager.get_model(model_qualified_name)
logger.info("Initializing stream processor for model: {}".format(self._model.qualified_name))
if self._model is None:
raise ValueError("'{}' not found in ModelManager instance.".format(model_qualified_name))
base_topic_name = "model_stream_processor.{}.{}.{}".format(model_qualified_name,
self._model.major_version,
self._model.minor_version)
# the topic from which the model will receive prediction inputs
self.consumer_topic = "{}.inputs".format(base_topic_name)
# the topic to which the model will send prediction outputs
self.producer_topic = "{}.outputs".format(base_topic_name)
# the topic to which the model will send prediction errors
self.error_producer_topic = "{}.errors".format(base_topic_name)
logger.info("{} stream processor: Consuming messages from topic {}.".format(self._model.qualified_name,
self.consumer_topic))
logger.info("{} stream processor: Producing messages to topics {} and {}.".format(self._model.qualified_name,
self.producer_topic,
self.error_producer_topic))
self._consumer = AIOKafkaConsumer(self.consumer_topic, loop=loop, bootstrap_servers=bootstrap_servers,
group_id=__name__)
self._producer = AIOKafkaProducer(loop=loop, bootstrap_servers=bootstrap_servers)
def __repr__(self):
"""Return string representation of stream processor."""
return "{} model: {} version: {}".format(super().__repr__(), self._model.qualified_name,
str(self._model.major_version) + "." + str(self._model.minor_version))
async def start(self):
"""Start the consumers and producers."""
logger.info("{} stream processor: Starting consumer and producer.".format(self._model.qualified_name))
await self._consumer.start()
await self._producer.start()
async def process(self):
"""Make predictions on records in a stream."""
async for message in self._consumer:
try:
data = json.loads(message.value)
prediction = self._model.predict(data=data)
serialized_prediction = json.dumps(prediction).encode()
await self._producer.send_and_wait(self.producer_topic, serialized_prediction)
except Exception as e:
logger.error("{} stream processor: Exception: {}".format(self._model.qualified_name, str(e)))
await self._producer.send_and_wait(self.error_producer_topic, message.value)
async def stop(self):
"""Stop the streaming processor."""
logger.info("{} stream processor: Stopping consumer and producer.".format(self._model.qualified_name))
await self._consumer.stop()
await self._producer.stop()
|
nilq/baby-python
|
python
|
# -*- coding:utf-8 -*-
from __future__ import print_function
import zipfile
import os
import shutil
import time
import property
import shutil
print('====================================\n\nThis Software Only For Android Studio Language Package Replace\nDevelop By Wellchang\n2019/03/20\n\n====================================\n\n')
print('please input the absolute path of new resource_en.jar file',end=':')
filename = input()
print('please input the absolute path of old resource_en.jar file',end=':')
filename_cn = input()
splitNew = filename.split('.')
splitLen = len(splitNew)
prefix = splitNew[splitLen-1]
prefixWithDot = '.' + splitNew[splitLen-1]
path2 = filename.replace(prefixWithDot,'')
path2_cn = filename_cn.replace(prefixWithDot,'')
print('Decompression new resource_en.jar file...',end='',flush=True)
fz = zipfile.ZipFile(filename, 'r')
for file in fz.namelist():
# print(file)
fz.extract(file, path2)
print('Done')
print('Decompression old resource_en.jar file...',end='',flush=True)
fzo = zipfile.ZipFile(filename_cn, 'r')
for file in fzo.namelist():
# print(file)
fzo.extract(file, path2_cn)
print('Done')
print('translate new resource_en.jar file...',end='',flush=True)
for file in fz.namelist():
if(file.endswith(".properties")):
props = property.parse(path2 + '\\' + file)
keys = props.keys
for fileCN in fzo.namelist():
if(fileCN == file):
propsCN = property.parse(path2_cn + '\\' + file)
keysCN = propsCN.keys
for key in keys:
# print(len(keys))
# print(file + "=======>" + key + "=" + props.get(key))
if(propsCN.has_key(key)):
props.set(key,propsCN.get(key))
props.save()
keys.clear()
print('Done')
print('Packing Translated file...',end='',flush=True)
file_new = path2 + "_new.jar"
zNew = zipfile.ZipFile(file_new, 'w', zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(path2): # os.walk 遍历目录
fpath = dirpath.replace(path2, '') # 这一句很重要,不replace的话,就从根目录开始复制
fpath = fpath and fpath + os.sep or '' # os.sep路径分隔符
for filename in filenames:
zNew.write(os.path.join(dirpath, filename), fpath+filename)
# os.path.join()函数用于路径拼接文件路径。
# os.path.split(path)把path分为目录和文件两个部分,以列表返回
print('Done')
zNew.close()
print('Delete the extracted file...',end='',flush=True)
shutil.rmtree(path2)
shutil.rmtree(path2_cn)
print('Done')
print('Translation completed!!!')
|
nilq/baby-python
|
python
|
from collections import Iterable
from iterable_collections.factory import DefaultMethodStrategyFactory
class Collection:
def __init__(self, iterable, strategies):
self._iterable = None
self.iterable = iterable
self._strategies = strategies
def __getattr__(self, item):
if item not in self._strategies:
raise AttributeError('Unknown attribute {}'.format(item))
return self._strategies[item].make_method(self)
def __iter__(self):
return iter(self.iterable)
def __next__(self):
return next(self.iterable)
def __repr__(self):
return 'Collection({})'.format(self.iterable)
@property
def iterable(self):
return self._iterable
@iterable.setter
def iterable(self, iterable):
if not isinstance(iterable, Iterable):
ValueError('Must be an Iterable type.')
self._iterable = iterable
def collect(iterable):
return Collection(iterable, DefaultMethodStrategyFactory().create())
|
nilq/baby-python
|
python
|
#!/usr/bin/env python2
import os
import sys
# add the current dir to python path
CURRENT_DIR = os.path.expanduser(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, CURRENT_DIR)
os.system('cd %s ;git pull' % CURRENT_DIR)
from app import app
if 'SERVER_SOFTWARE' in os.environ:
import sae
application = sae.create_wsgi_app(app)
else:
app.run(host='0.0.0.0')
|
nilq/baby-python
|
python
|
# MIT License
#
# Copyright (c) 2021 Douglas Davis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import functools
import contextlib
import sys
from typing import Iterator, Optional
import pygram11.config
from pygram11._backend import _omp_get_max_threads
def omp_get_max_threads() -> int:
"""Get the number of threads available to OpenMP.
This returns the result of calling the OpenMP C API function `of
the same name
<https://www.openmp.org/spec-html/5.0/openmpsu112.html>`_.
Returns
-------
int
the maximum number of available threads
"""
return _omp_get_max_threads()
def default_omp() -> None:
"""Set OpenMP acceleration thresholds to the default values."""
pygram11.config.set("thresholds.fix1d", 10_000)
pygram11.config.set("thresholds.fix1dmw", 10_000)
pygram11.config.set("thresholds.fix2d", 10_000)
pygram11.config.set("thresholds.var1d", 5_000)
pygram11.config.set("thresholds.var1dmw", 5_000)
pygram11.config.set("thresholds.var2d", 5_000)
def disable_omp() -> None:
"""Disable OpenMP acceleration by maximizing all thresholds."""
for k in pygram11.config.threshold_keys():
pygram11.config.set(k, sys.maxsize)
def force_omp() -> None:
"""Force OpenMP acceleration by nullifying all thresholds."""
for k in pygram11.config.threshold_keys():
pygram11.config.set(k, 0)
def without_omp(*args, **kwargs):
"""Wrap a function to disable OpenMP while it's called.
If a specific key is defined, only that threshold will be modified
to turn OpenMP off.
The settings of the pygram11 OpenMP threshold configurations will
be restored to their previous values at the end of the function
that is being wrapped.
Parameters
----------
key : str, optional
Specific threshold key to turn off.
Examples
--------
Writing a function with this decorator:
>>> import numpy as np
>>> from pygram11 import histogram, without_omp
>>> @without_omp
... def single_threaded_histogram():
... data = np.random.standard_normal(size=(1000,))
... return pygram11.histogram(data, bins=10, range=(-5, 5), flow=True)
Defining a specific `key`:
>>> import pygram11.config
>>> previous = pygram11.config.get("thresholds.var1d")
>>> @without_omp(key="thresholds.var1d")
... def single_threaded_histogram2():
... print(f"in function threshold: {pygram11.config.get('thresholds.var1d')}")
... data = np.random.standard_normal(size=(1000,))
... return pygram11.histogram(data, bins=[-2, -1, 1.5, 3.2])
>>> result = single_threaded_histogram2()
in function threshold: 9223372036854775807
>>> previous
5000
>>> previous == pygram11.config.get("thresholds.var1d")
True
>>> result[0].shape
(3,)
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if func:
key = None
if not func:
key = kwargs.get("key")
def cable(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
with omp_disabled(key=key):
res = func(*args, **kwargs)
return res
return decorator
return cable(func) if func else cable
def with_omp(*args, **kwargs):
"""Wrap a function to always enable OpenMP while it's called.
If a specific key is defined, only that threshold will be modified
to turn OpenMP on.
The settings of the pygram11 OpenMP threshold configurations will
be restored to their previous values at the end of the function
that is being wrapped.
Parameters
----------
key : str, optional
Specific threshold key to turn on.
Examples
--------
Writing a function with this decorator:
>>> import numpy as np
>>> from pygram11 import histogram, with_omp
>>> @with_omp
... def multi_threaded_histogram():
... data = np.random.standard_normal(size=(1000,))
... return pygram11.histogram(data, bins=10, range=(-5, 5), flow=True)
Defining a specific `key`:
>>> import pygram11.config
>>> previous = pygram11.config.get("thresholds.var1d")
>>> @with_omp(key="thresholds.var1d")
... def multi_threaded_histogram2():
... print(f"in function threshold: {pygram11.config.get('thresholds.var1d')}")
... data = np.random.standard_normal(size=(1000,))
... return pygram11.histogram(data, bins=[-2, -1, 1.5, 3.2])
>>> result = multi_threaded_histogram2()
in function threshold: 0
>>> previous
5000
>>> previous == pygram11.config.get("thresholds.var1d")
True
>>> result[0].shape
(3,)
"""
func = None
if len(args) == 1 and callable(args[0]):
func = args[0]
if func:
key = None
if not func:
key = kwargs.get("key")
def cable(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
with omp_forced(key=key):
res = func(*args, **kwargs)
return res
return decorator
return cable(func) if func else cable
@contextlib.contextmanager
def omp_disabled(*, key: Optional[str] = None) -> Iterator[None]:
"""Context manager to disable OpenMP.
Parameters
----------
key : str, optional
Specific threshold key to turn off.
Examples
--------
Using a specific key:
>>> import pygram11
>>> import numpy as np
>>> with pygram11.omp_disabled(key="thresholds.var1d"):
... data = np.random.standard_normal(size=(200,))
... result = pygram11.histogram(data, bins=[-2, -1, 1.5, 3.2])
>>> result[0].shape
(3,)
Disable all thresholds:
>>> import pygram11
>>> import numpy as np
>>> with pygram11.omp_disabled():
... data = np.random.standard_normal(size=(200,))
... result = pygram11.histogram(data, bins=12, range=(-3, 3))
>>> result[0].shape
(12,)
"""
if key is not None:
try:
prev = pygram11.config.get(key)
pygram11.config.set(key, sys.maxsize)
yield
finally:
pygram11.config.set(key, prev)
else:
previous = {k: pygram11.config.get(k) for k in pygram11.config.threshold_keys()}
try:
disable_omp()
yield
finally:
for k, v in previous.items():
pygram11.config.set(k, v)
@contextlib.contextmanager
def omp_forced(*, key: Optional[str] = None) -> Iterator[None]:
"""Context manager to force enable OpenMP.
Parameters
----------
key : str, optional
Specific threshold key to turn on.
Examples
--------
Using a specific key:
>>> import pygram11
>>> import numpy as np
>>> with pygram11.omp_forced(key="thresholds.var1d"):
... data = np.random.standard_normal(size=(200,))
... result = pygram11.histogram(data, bins=[-2, -1, 1.5, 3.2])
>>> result[0].shape
(3,)
Enable all thresholds:
>>> import pygram11
>>> import numpy as np
>>> with pygram11.omp_forced():
... data = np.random.standard_normal(size=(200,))
... result = pygram11.histogram(data, bins=10, range=(-3, 3))
>>> result[0].shape
(10,)
"""
if key is not None:
try:
prev = pygram11.config.get(key)
pygram11.config.set(key, 0)
yield
finally:
pygram11.config.set(key, prev)
else:
previous = {k: pygram11.config.get(k) for k in pygram11.config.threshold_keys()}
try:
force_omp()
yield
finally:
for k, v in previous.items():
pygram11.config.set(k, v)
|
nilq/baby-python
|
python
|
import numpy as np
import pandas as pd
import joblib
dataset = pd.read_csv("datasets/cleaned_cleveland.csv")
X = dataset.iloc[:, :-1]
y = dataset.iloc[:, -1]
from sklearn.neighbors import KNeighborsClassifier
regressor = KNeighborsClassifier(n_neighbors=21)
regressor.fit(X, y)
joblib.dump(regressor, "classification/model.pkl")
classification_model = joblib.load("classification/model.pkl")
# Test model for returning false result
# print(
# classification_model.predict([[41, 0, 2, 130, 204, 0, 2, 172, 0, 1.4, 1, 0.0, 3.0]])
# )
# Test model for returning true result
# print(
# classification_model.predict([[67, 1, 4, 120, 229, 0, 2, 129, 1, 2.6, 2, 2.0, 3.0]])
# )
|
nilq/baby-python
|
python
|
from flask import Flask
# 创建flask框架
# 静态文件访问的时候url匹配时,路由规则里的路径名字 默认值是、static
app = Flask(__name__, static_url_path='/static')
print(app.url_map)
# @app.route('/')
# def index():
# """处理index页面逻辑"""
# return 'nihao'
@app.route('/login.html')
def login():
"""登录的逻辑"""
# 读取login.html 并且返回
with open('login.html') as f:
content = f.read()
return content
num1 = 10
if __name__ == '__main__':
# 运行服务器、
app.run()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from setuptools import setup
import pathlib
here = pathlib.Path(__file__).parent.resolve()
long_description = (here / "README.md").read_text(encoding='utf-8')
setup(
name='zarr-swiftstore',
version="1.2.3",
description='swift storage backend for zarr',
long_description=long_description,
long_description_content_type='text/markdown',
python_requires=">=3.5",
package_dir={'': '.'},
packages=['zarrswift', 'zarrswift.tests'],
install_requires=[
'zarr>=2.4.0',
'python-swiftclient>=3.10.0',
'mock',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
author='Pavan Siligam',
author_email='pavan.siligam@gmail.com',
license='MIT',
url="https://github.com/siligam/zarr-swiftstore",
)
|
nilq/baby-python
|
python
|
import argparse
from config.config import Config
from dataset.factory import DatasetModule
from domain.metadata import Metadata
from logger import logger
from model.factory import ModelModule
from trainer.factory import TrainerModule
def main(args):
mode = args.mode.lower()
config_file_name = args.config.lower()
# Get Parameters
params = Config(file_name=config_file_name).params
logger.info(f"Parameter information :\n{params}")
metadata_params = params.metadata
dataset_params = params.dataset
model_params = params.model
trainer_params = params.trainer
# Metadata Controller
metadata = Metadata(**metadata_params)
# Dataset Controller
dataset_module = DatasetModule(metadata=metadata, **dataset_params)
# Model Controller
model_module = ModelModule(metadata=metadata, **model_params)
# Trainer Controller
trainer_module = TrainerModule(
metadata=metadata,
model_module=model_module,
dataset_module=dataset_module,
**trainer_params
)
result_dict = trainer_module.do(mode=mode)
print(result_dict)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Pytorch Project Template [Byeonggil Jung (Korea Univ, AIR Lab)]")
parser.add_argument("--mode", required=False, default="train", help="Select the mode, train | inference")
parser.add_argument("--config", required=True, help="Select the config file")
args = parser.parse_args()
logger.info(f"Selected parameters : {args}")
main(args=args)
|
nilq/baby-python
|
python
|
from qiskit.circuit.library import PauliFeatureMap
class ZZFeatureMap(PauliFeatureMap):
def __init__(
self,
feature_dimension,
reps=2,
entanglement="linear",
data_map_func=None,
insert_barriers=False,
name="ZZFeatureMap",
parameter_prefix="x",
):
"""
Create a new second-order Pauli-Z expansion.
@feature_dimension :: Number of features.
@reps :: The number of repeated circuits, has a min. 1.
@entanglement :: Specifies the entanglement structure. Refer to
@data_map_func :: A mapping function for data x.
@insert_barriers :: If True, barriers are inserted in between the
evolution instructions and hadamard layers.
"""
if feature_dimension < 2:
raise ValueError(
"The ZZFeatureMap contains 2-local interactions"
"and cannot be defined for less than 2 qubits."
f"You provided {feature_dimension}."
)
super().__init__(
feature_dimension=feature_dimension,
reps=reps,
entanglement=entanglement,
paulis=["Z", "ZZ"],
data_map_func=data_map_func,
insert_barriers=insert_barriers,
name=name,
parameter_prefix=parameter_prefix,
)
|
nilq/baby-python
|
python
|
#
# @lc app=leetcode.cn id=1 lang=python3
#
# [1] 两数之和
#
# https://leetcode-cn.com/problems/two-sum/description/
#
# algorithms
# Easy (48.55%)
# Likes: 8314
# Dislikes: 0
# Total Accepted: 1.1M
# Total Submissions: 2.2M
# Testcase Example: '[2,7,11,15]\n9'
#
# 给定一个整数数组 nums 和一个目标值 target,请你在该数组中找出和为目标值的那 两个 整数,并返回他们的数组下标。
#
# 你可以假设每种输入只会对应一个答案。但是,数组中同一个元素不能使用两遍。
#
#
#
# 示例:
#
# 给定 nums = [2, 7, 11, 15], target = 9
#
# 因为 nums[0] + nums[1] = 2 + 7 = 9
# 所以返回 [0, 1]
#
#
#
# @lc code=start
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]: ## 一边遍历
dic = {}
for i in range(len(nums)):
if target - nums[i] in dic:
return [dic[target-nums[i]],i]
dic[nums[i]] = i
return []
# @lc code=end
# def twoSum(self, nums: List[int], target: int) -> List[int]: 两边遍历
# dic = {}
# for i in range(len(nums)):
# dic[nums[i]] = i
# for i in range(len(nums)):
# if target - nums[i] in dic and dic[target-nums[i]] != i:
# return [i,dic[target-nums[i]]]
# return []
|
nilq/baby-python
|
python
|
class DetFace:
def __init__(self, conf, bbox):
self.conf = conf
self.bbox = bbox
self.name = ''
|
nilq/baby-python
|
python
|
from pathlib import Path
import tables
import pandas as pd
class Stream:
def __init__(self, path):
self.path = path
self.frame_id_list = self._frame_id_list()
self.frame_dict = {k:frame_id for k, frame_id in enumerate(self.frame_id_list)}
def _frame_id_list(self):
if not Path(self.path).exists():
frame_id_list = []
else:
with tables.open_file(self.path) as h:
frame_id_list = [int(str(frame).split(' ')[0].replace('/frame_', ''))
for frame in h.iter_nodes('/')]
frame_id_list.sort()
return frame_id_list
def __len__(self):
return len(self.frame_id_list)
def to_pandas(self, frame_id):
frame_id = self.frame_dict[frame_id]
return pd.read_hdf(self.path, f'frame_{frame_id}')
def __getitem__(self, frame_id):
if len(self) == 0:
h = 'null'
else:
if frame_id in self.frame_id_list:
h = self.to_pandas(frame_id)
elif isinstance(frame_id, slice):
h = [self.to_pandas(ID)
for ID in range(*frame_id.indices(len(self)))]
h = pd.concat(h)
else:
h = 'unreal'
return h
@property
def min_id(self):
if len(self) == 0:
return 0
else:
return min(self.frame_id_list)
@property
def max_id(self):
if len(self) == 0:
return 0
else:
return len(self)
@property
def marks(self):
if len(self) == 0:
return {0: '0'}
else:
return {
k: '' # frame_id
for k, frame_id in self.frame_dict.items()
}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# encoding: utf-8
"""
@version: v1.0
@author: xiaxianba
@license: Apache Licence
@contact: scuxia@gmail.com
@site: http://weibo.com/xiaxianba
@software: PyCharm
@file: SimTrade.py
@time: 2017/02/06 13:00
@describe: 展期数据
"""
import csv
import datetime as pydt
import numpy as np
import os
from WindPy import *
# 国内三大期货交易所共46个商品合约
list_item = ['CU.SHF', 'AL.SHF', 'ZN.SHF', 'PB.SHF', 'AU.SHF', 'AG.SHF', 'NI.SHF', 'SN.SHF',
'RB.SHF', 'WR.SHF', 'HC.SHF', 'BU.SHF', 'RU.SHF', 'M.DCE', 'Y.DCE',
'A.DCE', 'B.DCE', 'P.DCE', 'C.DCE', 'J.DCE', 'V.DCE', 'I.DCE',
'BB.DCE', 'FB.DCE', 'L.DCE', 'PP.DCE', 'JM.DCE', 'CS.DCE', 'CY.CZC',
'SR.CZC', 'CF.CZC', 'ZC.CZC', 'FG.CZC', 'TA.CZC', 'MA.CZC', 'WH.CZC', 'PM.CZC',
'RI.CZC', 'LR.CZC', 'JR.CZC', 'RS.CZC', 'OI.CZC', 'RM.CZC', 'SF.CZC', 'SM.CZC', ]
# 商品合约对应的Wind板块id
dict_item = {'CU.SHF':'a599010202000000', 'AL.SHF':'a599010203000000', 'ZN.SHF':'a599010204000000',
'PB.SHF':'1000002892000000', 'AU.SHF':'a599010205000000', 'AG.SHF':'1000006502000000',
'NI.SHF':'1000011457000000', 'SN.SHF':'1000011458000000', 'RB.SHF':'a599010206000000',
'WR.SHF':'a599010207000000', 'HC.SHF':'1000011455000000',
'BU.SHF':'1000011013000000', 'RU.SHF':'a599010208000000', 'M.DCE':'a599010304000000',
'Y.DCE':'a599010306000000', 'A.DCE':'a599010302000000', 'B.DCE':'a599010303000000',
'P.DCE':'a599010307000000', 'C.DCE':'a599010305000000', 'J.DCE':'1000002976000000',
'V.DCE':'a599010309000000', 'I.DCE':'1000011439000000', 'BB.DCE':'1000011466000000',
'FB.DCE':'1000011465000000', 'L.DCE':'a599010308000000', 'PP.DCE':'1000011468000000',
'JM.DCE':'1000009338000000', 'CS.DCE':'1000011469000000',
'CY.CZC':'1000011479000000', 'SR.CZC':'a599010405000000', 'CF.CZC':'a599010404000000',
'ZC.CZC':'1000011012000000', 'FG.CZC':'1000008549000000', 'TA.CZC':'a599010407000000',
'MA.CZC':'1000005981000000', 'WH.CZC':'a599010403000000', 'PM.CZC':'1000006567000000',
'RI.CZC':'a599010406000000', 'LR.CZC':'1000011476000000', 'JR.CZC':'1000011474000000',
'RS.CZC':'1000008621000000', 'OI.CZC':'a599010408000000', 'RM.CZC':'1000008622000000',
'SF.CZC':'1000011478000000', 'SM.CZC':'1000011477000000'}
def get_zhanqi(ext_list, datestr):
dict_rate = {}
prefix = "date="
suffix = ";sectorid="
file = os.getcwd() + "\\" + datestr + ".csv"
with open(file, "wb") as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["secuid", "exchangeid", "updatetime", "actionday", "tradingday", "value"])
for item in ext_list:
a = item.split('.')
dicts = {}
scope = prefix + datestr + suffix + dict_item[item]
result = w.wset("sectorconstituent", scope)
if result.ErrorCode == 0:
list_contract = result.Data[1]
for contract in list_contract:
result_volume = w.wsd(contract, "volume", datestr, datestr, "")
if result_volume.ErrorCode == 0:
dicts[contract] = result_volume.Data[0][0]
result_main = w.wsd(item, "trade_hiscode", datestr, datestr, "")
if result_main.ErrorCode == 0 and len(result_main.Data[0]) != 0:
main_contract = result_main.Data[0][0]
main_contract_price = w.wsd(main_contract, "close", datestr, datestr, "")
main_contract_delivery = w.wsd(main_contract, "lastdelivery_date", datestr, datestr, "")
dicts.pop(main_contract)
second_contract = sorted(dicts.items(), key=lambda item: item[1], reverse=True)[:1]
second_contract_price = w.wsd(dict(second_contract).keys(), "close", datestr, datestr, "")
second_contract_delivery = w.wsd(dict(second_contract).keys(), "lastdelivery_date", datestr, datestr, "")
if isinstance(main_contract_price.Data[0][0],float) and isinstance(second_contract_price.Data[0][0],float):
diff_price = np.log(float(main_contract_price.Data[0][0])) - np.log(float(second_contract_price.Data[0][0]))
diff_date = (second_contract_delivery.Data[0][0] - main_contract_delivery.Data[0][0]).days
dict_rate[item] = 365 * diff_price / diff_date
writer.writerow([a[0], a[1], "000000", datestr, datestr, dict_rate[item]])
def gene_file(date):
file = os.getcwd() + "\\" + date + ".csv"
with open(file,"wb") as csvfile:
writer=csv.writer(csvfile)
writer.writerow(["secuid","exchangeid","updatetime","actionday","tradingday","value"])
if __name__ == "__main__":
date_now = pydt.date.today() - pydt.timedelta(days=1)
datestr = date_now.strftime("%Y%m%d")
#datestr='20180822'
if len(sys.argv) > 1:
print sys.argv[1]
datestr = sys.argv[1]
w.start()
date_date = pydt.datetime.strptime(datestr, "%Y%m%d")
if date.isoweekday(date_date) < 6:
get_zhanqi(list_item, datestr)
else:
gene_file(datestr)
w.close()
|
nilq/baby-python
|
python
|
import obswebsocket, obswebsocket.requests
import logging
import time
import random
from obs.actions.Action import Action
from obs.actions.ShowSource import ShowSource
from obs.actions.HideSource import HideSource
from obs.Permission import Permission
class Toggle(Action):
def __init__(self, obs_client, command_name, aliases, description, permission, min_votes, args):
"""Initializes this class, see Action.py
"""
super().__init__(obs_client, command_name, aliases, description, permission, min_votes, args)
self.log = logging.getLogger(__name__)
self._init_args(args)
def execute(self, user):
"""Shows a scene item, such as an image or video, and then hides it after
a specified duration
"""
# Check user permissions and votes
if(not (
self._has_permission(user)
and self._has_enough_votes(user)
)
):
self._twitch_failed()
return False
# finally execute the command
if(not self.toggle_off_obj2.execute(user)):
return False
if(not self.toggle_on_obj1.execute(user)):
return False
# if a duration was specified then sleep and then hide the scene
if(self.duration is not None):
# wait the specified duration
time.sleep(self.duration)
if(not self.toggle_on_obj2.execute(user)):
return False
if(not self.toggle_off_obj1.execute(user)):
return False
self._twitch_done()
return True
def _init_args(self, args):
"""This validates the arguments are valid for this instance,
and raises a ValueError if they aren't.
Mandatory args:
scene item (string): Name of the scene to show.
Optional args:
scene (string): Name of scene where scene item is nested. If not provided,
then the current scene is used.
duration (int): Duration (seconds) to show scene.
"""
self.duration = args.get('duration', None) # Optional
self.toggle_on = args.get('toggle_on', None)
self.toggle_off = args.get('toggle_off', None)
if(self.toggle_on is None or self.toggle_off is None):
raise ValueError("Command {}: Args error, missing 'toggle_on' or 'toggle_off'".format(self.command_name))
if(self.duration is not None and self.duration < 0):
raise ValueError("Command {}: Args error, duration must be greater than zero".format(self.command_name))
# Try to instantiate the toggle on and off action classes
self.log.debug("Command {}: Toggle on/off args are {}/{}".format(self.command_name, self.toggle_on, self.toggle_off))
try:
self.toggle_on_obj1 = ShowSource(
self.obs_client,
self.command_name + "_toggle_on1",
None,
"Toggle On for {}".format(self.command_name),
Permission.EVERYONE,
0,
self.toggle_on)
except ValueError as e:
self.log.error("ERROR: " + e)
raise e
try:
self.toggle_off_obj1 = HideSource(
self.obs_client,
self.command_name + "_toggle_off1",
None,
"Toggle On for {}".format(self.command_name),
Permission.EVERYONE,
0,
self.toggle_on)
except ValueError as e:
self.log.error("ERROR: " + e)
raise e
try:
self.toggle_on_obj2 = ShowSource(
self.obs_client,
self.command_name + "_toggle_on2",
None,
"Toggle On for {}".format(self.command_name),
Permission.EVERYONE,
0,
self.toggle_off)
except ValueError as e:
self.log.error("ERROR: " + e)
raise e
try:
self.toggle_off_obj2 = HideSource(
self.obs_client,
self.command_name + "_toggle_off2",
None,
"Toggle On for {}".format(self.command_name),
Permission.EVERYONE,
0,
self.toggle_off)
except ValueError as e:
self.log.error("ERROR: " + e)
raise e
# disable randomizers to keep it simple for now
if(isinstance(self.toggle_on_obj1.source, list) or isinstance(self.toggle_off_obj1.source, list)):
self.toggle_on_obj1.source = self.toggle_on_obj1.source[0]
self.toggle_off_obj1.source = self.toggle_off_obj1.source[0]
if(isinstance(self.toggle_on_obj2.source, list) or isinstance(self.toggle_off_obj2.source, list)):
self.toggle_on_obj2.source = self.toggle_on_obj2.source[0]
self.toggle_off_obj2.source = self.toggle_off_obj2.source[0]
self.toggle_on_obj1.pick_from_group = False
self.toggle_off_obj1.pick_from_group = False
self.toggle_on_obj2.pick_from_group = False
self.toggle_off_obj2.pick_from_group = False
# Disable any duration args, it's controlled here instead
self.toggle_on_obj1.duration = None
self.toggle_off_obj1.duration = None
self.toggle_on_obj2.duration = None
self.toggle_off_obj2.duration = None
|
nilq/baby-python
|
python
|
from collections import namedtuple
from . import meta, pagination, resource_identifier
class ToOneLinks(namedtuple('ToOneLinks', ['maybe_self', 'maybe_related'])):
"""
Representation of links for a to-one relationship anywhere in a response.
"""
__slots__ = ()
def __new__(cls, maybe_self=None, maybe_related=None):
return super(ToOneLinks, cls).__new__(cls, maybe_self, maybe_related)
class ToManyLinks(namedtuple('ToManyLinks', ['pagination', 'maybe_self',
'maybe_related'])):
"""
Representation of links for a to-many relationship anywhere in a response.
"""
__slots__ = ()
def __new__(cls, pagination, maybe_self=None, maybe_related=None):
return super(ToManyLinks, cls).__new__(cls, pagination, maybe_self,
maybe_related)
class ToOne(namedtuple('ToOne', ['maybe_resource_id'])):
"""Representation of a to-one relationship."""
__slots__ = ()
def __new__(cls, maybe_resource_id=None):
return super(ToOne, cls).__new__(cls, maybe_resource_id)
class ToMany(namedtuple('ToMany', ['list_resource_ids'])):
"""Representation of at to-many relationship."""
__slots__ = ()
def __new__(cls, list_resource_ids):
return super(ToMany, cls).__new__(cls, list_resource_ids)
class Data(namedtuple('Data', ['either_to_many_or_to_one'])):
"""Representation of "data" section of relationships."""
__slots__ = ()
def __new__(cls, either_to_many_or_to_one):
return super(Data, cls).__new__(cls, either_to_many_or_to_one)
class Relationship(namedtuple(
'Relationship',
['name', 'any_data_or_links_or_meta', 'maybe_data',
'maybe_either_to_one_links_or_to_many_links', 'maybe_meta'])):
"""Representation of a relationship in a relationships lookup."""
__slots__ = ()
def __new__(cls, name, any_data_or_links_or_meta, maybe_data=None,
maybe_either_to_one_links_or_to_many_links=None,
maybe_meta=None):
return \
super(Relationship, cls).__new__(
cls, name, any_data_or_links_or_meta, maybe_data,
maybe_either_to_one_links_or_to_many_links, maybe_meta
)
class Relationships(namedtuple('Relationships', ['dict_relationships'])):
"""Representation of a relationships lookup anywhere in a response."""
__slots__ = ()
def __new__(cls, dict_relationships):
return super(Relationships, cls).__new__(cls, dict_relationships)
def mk_single_data(obj, config):
if type(obj) is list:
list_rid = [resource_identifier.mk(obj_rid, config) for obj_rid in obj]
return Data(ToMany(list_rid))
if type(obj) is dict:
return Data(ToOne(resource_identifier.mk(obj, config)))
if not obj:
return Data(ToOne(None))
msg = "relationships['data'] is unintelligible: {0}".format(str(obj))
raise RuntimeError(msg)
def mk_single_maybe_data(obj, config):
if 'data' in obj:
return mk_single_data(obj['data'], config)
else:
return None
def mk_to_one_links(obj, config):
maybe_self = obj.get( 'self', None)
maybe_related = obj.get('related', None)
return ToOneLinks(maybe_self, maybe_related)
def mk_to_many_links(obj, config):
_pagination = pagination.mk(obj, config)
maybe_self = obj.get( 'self', None)
maybe_related = obj.get('related', None)
return ToManyLinks(_pagination, maybe_self, maybe_related)
def mk_single_maybe_links(maybe_data, obj, config):
if 'links' in obj:
obj_links = obj['links']
if type(maybe_data.either_to_many_or_to_one) in [ToOne, type(None)]:
return mk_to_one_links(obj_links, config)
if type(maybe_data.either_to_many_or_to_one) is ToMany:
return mk_to_many_links(obj_links, config)
raise RuntimeError('insanity: {0}'.format(str(maybe_data)))
else:
return None
def mk_single_maybe_meta(obj, config):
if 'meta' in obj:
return meta.mk(obj['meta'], config)
else:
return None
def mk_single(name, obj, config):
maybe_data = mk_single_maybe_data(obj, config)
maybe_links = mk_single_maybe_links(maybe_data, obj, config)
maybe_meta = mk_single_maybe_meta(obj, config)
any_data_or_links_or_meta = maybe_data or maybe_links or maybe_meta
return Relationship(name, any_data_or_links_or_meta, maybe_data,
maybe_links, maybe_meta)
def mk(obj, config):
dict_relationships = {}
for name, obj_relationship in obj.items():
relationship = mk_single(name, obj_relationship, config)
if not relationship.any_data_or_links_or_meta:
raise RuntimeError('response must contain data, links, or meta')
dict_relationships[name] = relationship
return Relationships(dict_relationships)
|
nilq/baby-python
|
python
|
import random
import networkx as nx
from LightningGraph.LN_parser import read_data_to_xgraph, process_lightning_graph
LIGHTNING_GRAPH_DUMP_PATH = 'LightningGraph/old_dumps/LN_2020.05.13-08.00.01.json'
def sample_long_route(graph, amount, get_route_func, min_route_length=4, max_trials=10000):
"""
Sample src, dst nodes from graph and use the given function to find a long enough route between them
Try until success or max_trials.
"""
# Select random two nodes as src and dest, with the route between them being of length at least 'min_route_length'.
unisolated_nodes = list(set(graph) - set(nx.isolates(graph)))
for trial in range(max_trials):
src = random.choice(unisolated_nodes)
dest = random.choice(unisolated_nodes)
route = get_route_func(graph, src, dest, amount)
if len(route) >= min_route_length:
break
if trial == max_trials - 1:
raise RuntimeError("Warning: Too hard to find route in graph. Consider changing restrictions or graph")
return route, src, dest
def create_sub_graph_by_node_capacity(dump_path=LIGHTNING_GRAPH_DUMP_PATH, k=64, highest_capacity_offset=0):
"""
Creates a sub graph with at most k nodes, selecting nodes by their total capacities.
:param dump_path: The path to the JSON describing the lightning graph dump.
:param k: The maximal number of nodes in the resulting graph.
:param highest_capacity_offset: If it's 0, takes the k nodes with the highest capacity.
If its m > 0, takes the k first nodes after the first m nodes.
This is used to get a less connected graph.
We can't take lowest nodes as removing high
nodes usually makes the graph highly unconnected.
:returns: a connected graph with at most k nodes
"""
graph = read_data_to_xgraph(dump_path)
process_lightning_graph(graph, remove_isolated=True, total_capacity=True, infer_implementation=True)
sorted_nodes = sorted(graph.nodes, key=lambda node: graph.nodes[node]['total_capacity'], reverse=True)
# Can't take last nodes as removing highest capacity nodes makes most of them isolated
best_nodes = sorted_nodes[highest_capacity_offset: k + highest_capacity_offset]
graph = graph.subgraph(best_nodes).copy() # without copy a view is returned and the graph can not be changed.
# This may return a graph with less than k nodes
process_lightning_graph(graph, remove_isolated=True, total_capacity=True)
print(f"Creating sub graph with {len(graph.nodes)}/{len(sorted_nodes)} nodes and {len(graph.edges)} edges")
return graph
|
nilq/baby-python
|
python
|
# identifies patients with gout and thiazides
import csv
import statsmodels.api as statsmodels
from atcs import *
from icd import is_gout
highrisk_prescription_identified = 0
true_positive = 0
true_negative = 0
false_positive = 0
false_negative = 0
gout_treatment = allopurinol | benzbromaron | colchicin | febuxostat | probenecid
gout_contraindicated = xipamid | hydrochlorothiazid | torasemid
file = open('test_1847_geputzt.csv')
reader = csv.reader(file, delimiter=';')
headers = next(reader, None)
data = []
for row in reader:
data.append(dict(zip(headers, row)))
for row in data:
atc_codes = set()
for pos in range(1, 25 + 1):
row_name = 'atc_%02d' % pos
if row[row_name]:
atc_codes.add(row[row_name])
icd_codes = set()
for pos in range(1, 20 + 1):
row_name = 'icd10_%02d' % pos
if row[row_name]:
icd_codes.add(row[row_name])
if gout_treatment & atc_codes and any([is_gout(icd) for icd in icd_codes]):
true_positive += 1
if gout_treatment & atc_codes and not any([is_gout(icd) for icd in icd_codes]):
false_positive += 1
if not gout_treatment & atc_codes and any([is_gout(icd) for icd in icd_codes]):
false_negative += 1
if not gout_treatment & atc_codes and not any([is_gout(icd) for icd in icd_codes]):
true_negative += 1
try:
specificity = true_negative / (true_negative + false_positive)
except:
specificity = 1
try:
sensitivity = true_positive / (true_positive + false_negative)
except:
sensitivity = 1
ppv = true_positive / (true_positive + false_positive)
npv = true_negative / (true_negative + false_negative)
print('Specificity:', specificity,
statsmodels.stats.proportion_confint(true_negative, true_negative + false_positive, alpha=0.05, method='wilson'))
print('Sensitivity:', sensitivity,
statsmodels.stats.proportion_confint(true_positive, true_positive + false_negative, alpha=0.05, method='wilson'))
print('PPV:', ppv,
statsmodels.stats.proportion_confint(true_positive, true_positive + false_positive, alpha=0.05, method='wilson'))
print('NPV:', npv,
statsmodels.stats.proportion_confint(true_negative, true_negative + false_negative, alpha=0.05, method='wilson'))
print('High risk Prescriptions:', highrisk_prescription_identified)
print('True Positives:', true_positive, 'True Negatives:', true_negative, 'False Positives:', false_positive,
'False Negatives:', false_negative) # validation: Gout(true) - true_positive = false_negative
precision = ppv
recall = sensitivity
print('Precision:', precision, 'Recall:', recall, 'F1', 2 * precision * recall / (precision + recall))
|
nilq/baby-python
|
python
|
from AoCUtils import *
result = 0
partNumber = "1"
writeToLog = False
if writeToLog:
logFile = open("log" + partNumber + ".txt", "w")
else:
logFile = "stdout"
printLog = printLogFactory(logFile)
heights = {}
with open("input.txt", "r") as inputFile:
lines = inputFile.read().strip().split("\n")
for (y, line) in enumerate(lines):
line = line.strip()
for (x, char) in enumerate(line):
heights[Position(x, y)] = int(char)
for (x, y) in product(range(len(lines[0])), range(len(lines))):
p = MapPosition(x, y, frame=lines)
m = min([heights[q] for q in p.adjacent()])
if heights[p] < m:
result += heights[p] + 1
with open("output" + partNumber + ".txt", "w") as outputFile:
outputFile.write(str(result))
print(str(result))
if writeToLog:
cast(TextIOWrapper, logFile).close()
|
nilq/baby-python
|
python
|
import setuptools
__version__ = "0.2.0"
__author__ = "Ricardo Montañana Gómez"
def readme():
with open("README.md") as f:
return f.read()
setuptools.setup(
name="Odte",
version=__version__,
license="MIT License",
description="Oblique decision tree Ensemble",
long_description=readme(),
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
url="https://github.com/doctorado-ml/stree",
author=__author__,
author_email="ricardo.montanana@alu.uclm.es",
keywords="scikit-learn oblique-classifier oblique-decision-tree decision-\
tree ensemble svm svc",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.8",
"Natural Language :: English",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Intended Audience :: Science/Research",
],
install_requires=["scikit-learn", "numpy", "ipympl", "stree"],
test_suite="odte.tests",
zip_safe=False,
)
|
nilq/baby-python
|
python
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='ddp_asyncio',
version='0.3.0',
description='Asynchronous DDP library',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/hunternet93/ddp_asyncio',
download_url='https://github.com/hunternet93/ddp_asyncio/releases/download/0.2.0/ddp_asyncio-0.2.0.tar.gz',
author='Isaac Smith',
author_email='isaac@isrv.pw',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Framework :: AsyncIO'
],
keywords='ddp meteor',
packages=find_packages(),
install_requires=['websockets', 'ejson'],
)
|
nilq/baby-python
|
python
|
import random
import pandas as pd
def synthetic(n, categorical=[], continuous=[]):
"""Synthetic dataset.
For each element in ``categorical``, either 0 or 1 is generated randomly.
Similarly, for each element in ``continuous``, a random value between 0 and
100 is generated.
Parameters
----------
n: int
Number of people
categorical: iterable(str), optional
Categorical properties, e.g. gender, country, etc. Its values will be
either 0 or 1. Defaults to [].
values: iterable(str), optional
Continuous properties, e.g. age, average_mark, etc. Its values will be
between 0 and 100. Defaults to [].
Returns
-------
pd.DataFrame
Sythetic dataset
"""
return pd.DataFrame(dict(name=[f'person-{i}' for i in range(n)],
**{c: [random.randint(0, 1) for _ in range(n)] for c in categorical},
**{v: [random.randint(45, 90) for _ in range(n)] for v in continuous}))
|
nilq/baby-python
|
python
|
# Copyright Aleksey Gurtovoy 2001-2004
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
# See http://www.boost.org/libs/mpl for documentation.
# $Source: /CVSROOT/boost/libs/mpl/preprocessed/preprocess_set.py,v $
# $Date: 2007/10/29 07:32:56 $
# $Revision: 1.1.1.1 $
import preprocess
preprocess.main(
[ "plain" ]
, "set"
, "boost\\mpl\\set\\aux_\\preprocessed"
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from Bio import SeqIO
from Bio.SeqUtils import GC
import click
import math
import random
import sys
CONTEXT_SETTINGS = {
"help_option_names": ["-h", "--help"],
}
@click.command(no_args_is_help=True, context_settings=CONTEXT_SETTINGS)
@click.argument(
"fasta_file",
type=click.Path(exists=True, resolve_path=True),
)
@click.option(
"-f", "--filter-masked",
help="Filter masked DNA sequences.",
is_flag=True,
)
@click.option(
"-s", "--subsample",
help="Number of sequences to subsample.",
type=int,
default=1000,
show_default=True,
)
@click.option(
"-o", "--output-file",
help="Output file. [default: STDOUT]",
type=click.Path(writable=True, readable=False, resolve_path=True,
allow_dash=True),
)
def main(**args):
# Group sequences by %GC content
gc_groups = {}
for record in SeqIO.parse(args["fasta_file"], "fasta"):
if args["filter_masked"]:
if record.seq.count("N") or record.seq.count("n"):
continue
gc = round(GC(record.seq))
gc_groups.setdefault(gc, [])
gc_groups[gc].append(record)
# Subsampling
sampled = []
random_seed = 123
norm_factor = args["subsample"] / \
sum([len(v) for v in gc_groups.values()])
for i in sorted(gc_groups):
random.Random(random_seed).shuffle(gc_groups[i])
sampled.extend(gc_groups[i][:math.ceil(len(gc_groups[i])*norm_factor)])
random.Random(random_seed).shuffle(sampled)
# Write
if args["output_file"] is not None:
handle = open(args["output_file"], "wt")
else:
handle = sys.stdout
SeqIO.write(sampled[:args["subsample"]], handle, "fasta")
handle.close()
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
# Copyright 2021 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff
# This software is distributed under the 3-clause BSD License.
# Code that is producing a xhat and a confidence interval using sequential sampling
# This is the implementation of the 2 following papers:
# [bm2011] Bayraksan, G., Morton,D.P.: A Sequential Sampling Procedure for Stochastic Programming. Operations Research 59(4), 898-913 (2011)
# [bpl2012] Bayraksan, G., Pierre-Louis, P.: Fixed-Width Sequential Stopping Rules for a Class of Stochastic Programs, SIAM Journal on Optimization 22(4), 1518-1548 (2012)
# see also multi_seqsampling.py, which has a class derived from this class
import pyomo.environ as pyo
import mpi4py.MPI as mpi
import mpisppy.utils.sputils as sputils
import numpy as np
import scipy.stats
import importlib
from mpisppy import global_toc
fullcomm = mpi.COMM_WORLD
global_rank = fullcomm.Get_rank()
import mpisppy.utils.amalgamator as amalgamator
import mpisppy.utils.xhat_eval as xhat_eval
import mpisppy.confidence_intervals.ciutils as ciutils
from mpisppy.tests.examples.apl1p import xhat_generator_apl1p
#==========
def is_needed(options,needed_things,message=""):
if not set(needed_things)<= set(options):
raise RuntimeError("Some options are missing from this list of reqiored options:\n"
f"{needed_things}\n"
f"{message}")
def add_options(options,optional_things,optional_default_settings):
# allow for defaults on options that Bayraksan et al establish
for i in range(len(optional_things)):
ething = optional_things[i]
if not ething in options :
options[ething]=optional_default_settings[i]
def xhat_generator_farmer(scenario_names, solvername="gurobi", solver_options=None, crops_multiplier=1):
''' For developer testing: Given scenario names and
options, create the scenarios and compute the xhat that is minimizing the
approximate problem associated with these scenarios.
Parameters
----------
scenario_names: int
Names of the scenario we use
solvername: str, optional
Name of the solver used. The default is "gurobi".
solver_options: dict, optional
Solving options. The default is None.
crops_multiplier: int, optional
A parameter of the farmer model. The default is 1.
Returns
-------
xhat: xhat object (dict containing a 'ROOT' key with a np.array)
A generated xhat.
NOTE: this is here for testing during development.
'''
num_scens = len(scenario_names)
ama_options = { "EF-2stage": True,
"EF_solver_name": solvername,
"EF_solver_options": solver_options,
"use_integer": False,
"crops_multiplier": crops_multiplier,
"num_scens": num_scens,
"_mpisppy_probability": 1/num_scens,
}
#We use from_module to build easily an Amalgamator object
ama = amalgamator.from_module("mpisppy.tests.examples.farmer",
ama_options,use_command_line=False)
#Correcting the building by putting the right scenarios.
ama.scenario_names = scenario_names
ama.run()
# get the xhat
xhat = sputils.nonant_cache_from_ef(ama.ef)
return xhat
class SeqSampling():
"""
Computing a solution xhat and a confidence interval for the optimality gap sequentially,
by taking an increasing number of scenarios.
Args:
refmodel (str): path of the model we use (e.g. farmer, uc)
xhat_generator (function): a function that takes scenario_names (and
and optional solvername and solver_options)
as input and returns a first stage policy
xhat.
options (dict): multiple parameters, e.g.:
- "solvername", str, the name of the solver we use
- "solver_options", dict containing solver options
(default is {}, an empty dict)
- "sample_size_ratio", float, the ratio (xhat sample size)/(gap estimators sample size)
(default is 1)
- "xhat_gen_options" dict containing options passed to the xhat generator
(default is {}, an empty dict)
- "ArRP", int, how many estimators should be pooled to compute G and s ?
(default is 1, no pooling)
- "kf_Gs", int, resampling frequency to compute estimators
(default is 1, always resample completely)
- "kf_xhat", int, resampling frequency to compute xhat
(default is 1, always resample completely)
-"confidence_level", float, asymptotic confidence level
of the output confidence interval
(default is 0.95)
-Some other parameters, depending on what model
(BM or BPL, deterministic or sequential sampling)
stochastic_sampling (bool, default False): should we compute sample sizes using estimators ?
if stochastic_sampling is True, we compute sample size using §5 of [Bayraksan and Pierre-Louis]
else, we compute them using [Bayraksan and Morton] technique
stopping_criterion (str, default 'BM'): which stopping criterion should be used ?
2 criterions are supported : 'BM' for [Bayraksan and Morton] and 'BPL' for [Bayraksan and Pierre-Louis]
solving_type (str, default 'EF-2stage'): how do we solve the approximate problems ?
Must be one of 'EF-2stage' and 'EF-mstage' (for problems with more than 2 stages).
Solving methods outside EF are not supported yet.
"""
def __init__(self,
refmodel,
xhat_generator,
options,
stochastic_sampling = False,
stopping_criterion = "BM",
solving_type = "None"):
self.refmodel = importlib.import_module(refmodel)
self.refmodelname = refmodel
self.xhat_generator = xhat_generator
self.options = options
self.stochastic_sampling = stochastic_sampling
self.stopping_criterion = stopping_criterion
self.solving_type = solving_type
self.solvername = options.get("solvername", None)
self.solver_options = options["solver_options"] if "solver_options" in options else None
self.sample_size_ratio = options["sample_size_ratio"] if "sample_size_ration" in options else 1
self.xhat_gen_options = options["xhat_gen_options"] if "xhat_gen_options" in options else {}
#Check if refmodel has all needed attributes
everything = ["scenario_names_creator",
"scenario_creator",
"kw_creator"] # denouement can be missing.
you_can_have_it_all = True
for ething in everything:
if not hasattr(self.refmodel, ething):
print(f"Module {refmodel} is missing {ething}")
you_can_have_it_all = False
if not you_can_have_it_all:
raise RuntimeError(f"Module {refmodel} not complete for seqsampling")
#Manage options
optional_options = ["ArRP","kf_Gs","kf_xhat","confidence_level"]
optional_default_settings = [1,1,1,0.95]
add_options(options, optional_options, optional_default_settings)
if self.stochastic_sampling :
add_options(options, ["n0min"], [50])
if self.stopping_criterion == "BM":
needed_things = ["epsprime","hprime","eps","h","p"]
is_needed(options, needed_things)
optional_things = ["q"]
optional_default_settings = [None]
add_options(options, optional_things, optional_default_settings)
elif self.stopping_criterion == "BPL":
is_needed(options, ["eps"])
if not self.stochastic_sampling :
optional_things = ["c0","c1","growth_function"]
optional_default_settings = [50,2,(lambda x : x-1)]
add_options(options, optional_things, optional_default_settings)
else:
raise RuntimeError("Only BM and BPL criteria are supported at this time.")
for oname in options:
setattr(self, oname, options[oname]) #Set every option as an attribute
#Check the solving_type, and find if the problem is multistage
two_stage_types = ['EF-2stage']
multistage_types = ['EF-mstage']
if self.solving_type in two_stage_types:
self.multistage = False
elif self.solving_type in multistage_types:
self.multistage = True
else:
raise RuntimeError(f"The solving_type {self.solving_type} is not supported."
f"If you want to run a 2-stage problem, please use a solving_type in {two_stage_types}"
f"If you want to run a multistage stage problem, please use a solving_type in {multistage_types}")
#Check the multistage options
if self.multistage:
needed_things = ["branching_factors"]
is_needed(options, needed_things)
if options['kf_Gs'] != 1 or options['kf_xhat'] != 1:
raise RuntimeError("Resampling frequencies must be set equal to one for multistage.")
#Get the stopping criterion
if self.stopping_criterion == "BM":
self.stop_criterion = self.bm_stopping_criterion
elif self.stopping_criterion == "BPL":
self.stop_criterion = self.bpl_stopping_criterion
else:
raise RuntimeError("Only BM and BPL criteria are supported.")
#Get the function computing sample size
if self.stochastic_sampling:
self.sample_size = self.stochastic_sampsize
elif self.stopping_criterion == "BM":
self.sample_size = self.bm_sampsize
elif self.stopping_criterion == "BPL":
self.sample_size = self.bpl_fsp_sampsize
else:
raise RuntimeError("Only BM and BPL sample sizes are supported yet")
#To be sure to always use new scenarios, we set a ScenCount that is
#telling us how many scenarios has been used so far
self.ScenCount = 0
#If we are running a multistage problem, we also need a seed count
self.SeedCount = 0
def bm_stopping_criterion(self,G,s,nk):
# arguments defined in [bm2011]
return(G>self.hprime*s+self.epsprime)
def bpl_stopping_criterion(self,G,s,nk):
# arguments defined in [bpl2012]
t = scipy.stats.t.ppf(self.confidence_level,nk-1)
sample_error = t*s/np.sqrt(nk)
inflation_factor = 1/np.sqrt(nk)
return(G+sample_error+inflation_factor>self.eps)
def bm_sampsize(self,k,G,s,nk_m1, r=2):
# arguments defined in [bm2011]
h = self.h
hprime = self.hprime
p = self.p
q = self.q
confidence_level = self.confidence_level
if q is None :
# Computing n_k as in (5) of [Bayraksan and Morton, 2009]
if hasattr(self, "c") :
c = self.c
else:
if confidence_level is None :
raise RuntimeError("We need the confidence level to compute the constant cp")
j = np.arange(1,1000)
s = sum(np.power(j,-p*np.log(j)))
c = max(1,2*np.log(s/(np.sqrt(2*np.pi)*(1-confidence_level))))
lower_bound = (c+2*p* np.log(k)**2)/((h-hprime)**2)
else :
# Computing n_k as in (14) of [Bayraksan and Morton, 2009]
if hasattr(self, "c") :
c = self.c
else:
if confidence_level is None :
RuntimeError("We need the confidence level to compute the constant c_pq")
j = np.arange(1,1000)
s = sum(np.exp(-p*np.power(j,2*q/r)))
c = max(1,2*np.log(s/(np.sqrt(2*np.pi)*(1-confidence_level))))
lower_bound = (c+2*p*np.power(k,2*q/r))/((h-hprime)**2)
#print(f"nk={lower_bound}")
return int(np.ceil(lower_bound))
def bpl_fsp_sampsize(self,k,G,s,nk_m1):
# arguments defined in [bpl2012]
return(int(np.ceil(self.c0+self.c1*self.growth_function(k))))
def stochastic_sampsize(self,k,G,s,nk_m1):
# arguments defined in [bpl2012]
if (k==1):
#Initialization
return(int(np.ceil(max(self.n0min,np.log(1/self.eps)))))
#§5 of [Bayraksan and Pierre-Louis] : solving a 2nd degree equation in sqrt(n)
t = scipy.stats.t.ppf(self.confidence_level,nk_m1-1)
a = - self.eps
b = 1+t*s
c = nk_m1*G
maxroot = -(np.sqrt(b**2-4*a*c)+b)/(2*a)
print(f"s={s}, t={t}, G={G}")
print(f"a={a}, b={b},c={c},delta={b**2-4*a*c}")
print(f"At iteration {k}, we took n_k={int(np.ceil((maxroot**2)))}")
return(int(np.ceil(maxroot**2)))
def run(self,maxit=200):
""" Execute a sequental sampling algorithm
Args:
maxit (int): override the stopping criteria based on iterations
Returns:
{"T":T,"Candidate_solution":final_xhat,"CI":CI,}
"""
if self.multistage:
raise RuntimeWarning("Multistage sequential sampling can be done "
"using the SeqSampling, but dependent samples\n"
"will be used. The class IndepScens_SeqSampling uses independent samples and therefor has better theoretical support.")
refmodel = self.refmodel
mult = self.sample_size_ratio # used to set m_k= mult*n_k
#----------------------------Step 0 -------------------------------------#
#Initialization
k =1
#Computing the lower bound for n_1
if self.stopping_criterion == "BM":
#Finding a constant used to compute nk
r = 2 #TODO : we could add flexibility here
j = np.arange(1,1000)
if self.q is None:
s = sum(np.power(j,-self.p*np.log(j)))
else:
if self.q<1:
raise RuntimeError("Parameter q should be greater than 1.")
s = sum(np.exp(-self.p*np.power(j,2*self.q/r)))
self.c = max(1,2*np.log(s/(np.sqrt(2*np.pi)*(1-self.confidence_level))))
lower_bound_k = self.sample_size(k, None, None, None)
#Computing xhat_1.
#We use sample_size_ratio*n_k observations to compute xhat_k
if self.multistage:
xhat_branching_factors = ciutils.scalable_branching_factors(mult*lower_bound_k, self.options['branching_factors'])
mk = np.prod(xhat_branching_factors)
self.xhat_gen_options['start_seed'] = self.SeedCount #TODO: Maybe find a better way to manage seed
xhat_scenario_names = refmodel.scenario_names_creator(mk)
else:
mk = int(np.floor(mult*lower_bound_k))
xhat_scenario_names = refmodel.scenario_names_creator(mk, start=self.ScenCount)
self.ScenCount+=mk
xgo = self.xhat_gen_options.copy()
xgo.pop("solvername", None) # it will be given explicitly
xgo.pop("solver_options", None) # it will be given explicitly
xgo.pop("scenario_names", None) # given explicitly
xhat_k = self.xhat_generator(xhat_scenario_names,
solvername=self.solvername,
solver_options=self.solver_options,
**xgo)
#----------------------------Step 1 -------------------------------------#
#Computing n_1 and associated scenario names
if self.multistage:
self.SeedCount += sputils.number_of_nodes(xhat_branching_factors)
gap_branching_factors = ciutils.scalable_branching_factors(lower_bound_k, self.options['branching_factors'])
nk = np.prod(gap_branching_factors)
estimator_scenario_names = refmodel.scenario_names_creator(nk)
sample_options = {'branching_factors':gap_branching_factors, 'seed':self.SeedCount}
else:
nk = self.ArRP *int(np.ceil(lower_bound_k/self.ArRP))
estimator_scenario_names = refmodel.scenario_names_creator(nk,
start=self.ScenCount)
sample_options = None
self.ScenCount+= nk
#Computing G_nkand s_k associated with xhat_1
self.options['num_scens'] = nk
scenario_creator_kwargs = self.refmodel.kw_creator(self.options)
scenario_denouement = refmodel.scenario_denouement if hasattr(refmodel, "scenario_denouement") else None
estim = ciutils.gap_estimators(xhat_k, self.refmodelname,
solving_type=self.solving_type,
scenario_names=estimator_scenario_names,
sample_options=sample_options,
ArRP=self.ArRP,
scenario_creator_kwargs=scenario_creator_kwargs,
scenario_denouement=scenario_denouement,
solvername=self.solvername,
solver_options=self.solver_options)
Gk,sk = estim['G'],estim['s']
if self.multistage:
self.SeedCount = estim['seed']
#----------------------------Step 2 -------------------------------------#
while( self.stop_criterion(Gk,sk,nk) and k<maxit):
#----------------------------Step 3 -------------------------------------#
k+=1
nk_m1 = nk #n_{k-1}
mk_m1 = mk
lower_bound_k = self.sample_size(k, Gk, sk, nk_m1)
#Computing m_k and associated scenario names
if self.multistage:
xhat_branching_factors = ciutils.scalable_branching_factors(mult*lower_bound_k, self.options['branching_factors'])
mk = np.prod(xhat_branching_factors)
self.xhat_gen_options['start_seed'] = self.SeedCount #TODO: Maybe find a better way to manage seed
xhat_scenario_names = refmodel.scenario_names_creator(mk)
else:
mk = int(np.floor(mult*lower_bound_k))
assert mk>= mk_m1, "Our sample size should be increasing"
if (k%self.kf_xhat==0):
#We use only new scenarios to compute xhat
xhat_scenario_names = refmodel.scenario_names_creator(int(mult*nk),
start=self.ScenCount)
self.ScenCount+= mk
else:
#We reuse the previous scenarios
xhat_scenario_names+= refmodel.scenario_names_creator(mult*(nk-nk_m1),
start=self.ScenCount)
self.ScenCount+= mk-mk_m1
#Computing xhat_k
xgo = self.xhat_gen_options.copy()
xgo.pop("solvername", None) # it will be given explicitly
xgo.pop("solver_options", None) # it will be given explicitly
xgo.pop("scenario_names", None) # given explicitly
xhat_k = self.xhat_generator(xhat_scenario_names,
solvername=self.solvername,
solver_options=self.solver_options,
**xgo)
#Computing n_k and associated scenario names
if self.multistage:
self.SeedCount += sputils.number_of_nodes(xhat_branching_factors)
gap_branching_factors = ciutils.scalable_branching_factors(lower_bound_k, self.options['branching_factors'])
nk = np.prod(gap_branching_factors)
estimator_scenario_names = refmodel.scenario_names_creator(nk)
sample_options = {'branching_factors':gap_branching_factors, 'seed':self.SeedCount}
else:
nk = self.ArRP *int(np.ceil(lower_bound_k/self.ArRP))
assert nk>= nk_m1, "Our sample size should be increasing"
if (k%self.kf_Gs==0):
#We use only new scenarios to compute gap estimators
estimator_scenario_names = refmodel.scenario_names_creator(nk,
start=self.ScenCount)
self.ScenCount+=nk
else:
#We reuse the previous scenarios
estimator_scenario_names+= refmodel.scenario_names_creator((nk-nk_m1),
start=self.ScenCount)
self.ScenCount+= (nk-nk_m1)
sample_options = None
#Computing G_k and s_k
self.options['num_scens'] = nk
scenario_creator_kwargs = self.refmodel.kw_creator(self.options)
estim = ciutils.gap_estimators(xhat_k, self.refmodelname,
solving_type=self.solving_type,
scenario_names=estimator_scenario_names,
sample_options=sample_options,
ArRP=self.ArRP,
scenario_creator_kwargs=scenario_creator_kwargs,
scenario_denouement=scenario_denouement,
solvername=self.solvername,
solver_options=self.solver_options)
if self.multistage:
self.SeedCount = estim['seed']
Gk,sk = estim['G'],estim['s']
if (k%10==0) and global_rank==0:
print(f"k={k}")
print(f"n_k={nk}")
print(f"G_k={Gk}")
print(f"s_k={sk}")
#----------------------------Step 4 -------------------------------------#
if (k==maxit) :
raise RuntimeError(f"The loop terminated after {maxit} iteration with no acceptable solution")
T = k
final_xhat=xhat_k
if self.stopping_criterion == "BM":
upper_bound=self.h*sk+self.eps
elif self.stopping_criterion == "BPL":
upper_bound = self.eps
else:
raise RuntimeError("Only BM and BPL criterion are supported yet.")
CI=[0,upper_bound]
global_toc(f"G={Gk} sk={sk}; xhat has been computed with {nk*mult} observations.")
return {"T":T,"Candidate_solution":final_xhat,"CI":CI,}
if __name__ == "__main__":
# for developer testing
solvername = "cplex"
refmodel = "mpisppy.tests.examples.farmer"
farmer_opt_dict = {"crops_multiplier":3}
# create three options dictionaries, then use one of them
# relative width
optionsBM = {'h':0.2,
'hprime':0.015,
'eps':0.5,
'epsprime':0.4,
"p":0.2,
"q":1.2,
"solvername":solvername,
"stopping": "BM" # TBD use this and drop stopping_criterion from the constructor
}
# fixed width, fully sequential
optionsFSP = {'eps': 50.0,
'solvername': solvername,
"c0":50, # starting sample size
"xhat_gen_options":farmer_opt_dict,
"crops_multiplier":3, # option for the farmer problem
"ArRP":2, # this must be 1 for any multi-stage problems
"stopping": "BPL"
}
# fixed width sequential with stochastic samples
optionsSSP = {'eps': 1.0,
'solvername': solvername,
"n0min":200, # only for stochastic sampling
"stopping": "BPL",
#"xhat_gen_options": farmer_opt_dict,
#"crops_multiplier": 3,
}
# change the options argument and stopping criterion
our_pb = SeqSampling(refmodel,
xhat_generator_farmer,
optionsFSP,
stochastic_sampling=False, # maybe this should move to the options dict?
stopping_criterion="BPL",
)
res = our_pb.run()
print(res)
|
nilq/baby-python
|
python
|
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.python.tasks.isort_run import IsortRun
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_daemon
class IsortRunIntegrationTest(PantsRunIntegrationTest):
@ensure_daemon
def test_isort_no_python_sources_should_noop(self):
command = ['-ldebug',
'fmt.isort',
'testprojects/tests/java/org/pantsbuild/testproject/dummies/::',
'--',
'--check-only']
pants_run = self.run_pants(command=command)
self.assert_success(pants_run)
self.assertIn(IsortRun.NOOP_MSG_HAS_TARGET_BUT_NO_SOURCE, pants_run.stderr_data)
|
nilq/baby-python
|
python
|
import cv2
import random
import numpy as np
from utils.bbox_utils import iou, object_coverage
from utils.textboxes_utils import get_bboxes_from_quads
def random_crop_quad(
image,
quads,
classes,
min_size=0.1,
max_size=1,
min_ar=1,
max_ar=2,
overlap_modes=[
None,
[0.1, None],
[0.3, None],
[0.7, None],
[0.9, None],
[None, None],
],
max_attempts=100,
p=0.5
):
""" Randomly crops a patch from the image.
Args:
- image: numpy array representing the input image.
- quads: numpy array representing the quads.
- classes: the list of classes associating with each quads.
- min_size: the maximum size a crop can be
- max_size: the maximum size a crop can be
- min_ar: the minimum aspect ratio a crop can be
- max_ar: the maximum aspect ratio a crop can be
- overlap_modes: the list of overlapping modes the function can randomly choose from.
- max_attempts: the max number of attempts to generate a patch.
Returns:
- image: the modified image
- quads: the modified quads
- classes: the modified classes
"""
assert p >= 0, "p must be larger than or equal to zero"
assert p <= 1, "p must be less than or equal to 1"
assert min_size > 0, "min_size must be larger than zero."
assert max_size <= 1, "max_size must be less than or equals to one."
assert max_size > min_size, "max_size must be larger than min_size."
assert max_ar > min_ar, "max_ar must be larger than min_ar."
assert max_attempts > 0, "max_attempts must be larger than zero."
# if (random.random() > p):
# return image, bboxes, classes
height, width, channels = image.shape
overlap_mode = [0.7, None]
# overlap_mode = random.choice(overlap_modes)
# if overlap_mode == None:
# return image, bboxes, classes
bboxes = get_bboxes_from_quads(quads)
min_iou, max_iou = overlap_mode
if min_iou == None:
min_iou = float(-np.inf)
if max_iou == None:
max_iou = float(np.inf)
temp_image = image.copy()
for i in range(max_attempts):
crop_w = random.uniform(min_size * width, max_size * width)
crop_h = random.uniform(min_size * height, max_size * height)
crop_ar = crop_h / crop_w
if crop_ar < min_ar or crop_ar > max_ar: # crop ar does not match criteria, next attempt
continue
crop_left = random.uniform(0, width-crop_w)
crop_top = random.uniform(0, height-crop_h)
crop_rect = np.array([crop_left, crop_top, crop_left + crop_w, crop_top + crop_h], dtype=np.float)
crop_rect = np.expand_dims(crop_rect, axis=0)
crop_rect = np.tile(crop_rect, (bboxes.shape[0], 1))
ious = iou(crop_rect, bboxes)
obj_coverage = object_coverage(crop_rect, bboxes)
if (ious.min() < min_iou and ious.max() > max_iou) or (obj_coverage.min() < min_iou and obj_coverage.max() > max_iou):
continue
bbox_centers = np.zeros((bboxes.shape[0], 2), dtype=np.float)
bbox_centers[:, 0] = (bboxes[:, 0] + bboxes[:, 2]) / 2
bbox_centers[:, 1] = (bboxes[:, 1] + bboxes[:, 3]) / 2
cx_in_crop = (bbox_centers[:, 0] > crop_left) * (bbox_centers[:, 0] < crop_left + crop_w)
cy_in_crop = (bbox_centers[:, 1] > crop_top) * (bbox_centers[:, 1] < crop_top + crop_h)
boxes_in_crop = cx_in_crop * cy_in_crop
if not boxes_in_crop.any():
continue
print(ious, obj_coverage, boxes_in_crop)
print("======")
temp_image = temp_image[int(crop_top): int(crop_top+crop_h), int(crop_left): int(crop_left+crop_w), :]
temp_classes = np.array(classes, dtype=np.object)
temp_classes = temp_classes[boxes_in_crop]
temp_bboxes = bboxes[boxes_in_crop]
temp_quads = quads[boxes_in_crop]
crop_rect = np.array([crop_left, crop_top, crop_left + crop_w, crop_top + crop_h], dtype=np.float)
crop_rect = np.expand_dims(crop_rect, axis=0)
crop_rect = np.tile(crop_rect, (temp_bboxes.shape[0], 1))
print(temp_quads.shape)
temp_bboxes[:, :2] = np.maximum(temp_bboxes[:, :2], crop_rect[:, :2]) # if bboxes top left is out of crop then use crop's xmin, ymin
temp_bboxes[:, :2] -= crop_rect[:, :2] # translate xmin, ymin to fit crop
temp_bboxes[:, 2:] = np.minimum(temp_bboxes[:, 2:], crop_rect[:, 2:])
temp_bboxes[:, 2:] -= crop_rect[:, :2] # translate xmax, ymax to fit crop
return temp_image, temp_quads, temp_classes.tolist()
return image, bboxes, classes
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torchvision
from . import resnet as resnet
from . import resnext as resnext
from torch.nn.init import kaiming_normal_,constant_,normal_
from core.config import cfg
import torch.nn.functional as F
import modeling.CRL as CRL
import modeling.cspn as cspn
import time
timer=time.time
if not cfg.SEM.BN_LEARN:
from lib.nn import SynchronizedBatchNorm2d
else:
import torch.nn.BatchNorm2d as SynchronizedBatchNorm2d
def correlate(input1, input2):
out_corr = spatial_correlation_sample(input1,
input2,
kernel_size=1,
patch_size=21,
stride=1,
padding=0,
dilation_patch=2)
# collate dimensions 1 and 2 in order to be treated as a
# regular 4D tensor
b, ph, pw, h, w = out_corr.size()
out_corr = out_corr.view(b, ph * pw, h, w)/input1.size(1)
return F.leaky_relu_(out_corr, 0.1)
class CorrelationLayer1D(nn.Module):
def __init__(self, max_disp=40, stride_2=1):
super(CorrelationLayer1D, self).__init__()
self.max_displacement = max_disp
self.stride_2 = stride_2
def forward(self, x_1, x_2):
x_1 = x_1
x_2 = F.pad(x_2, (int(self.max_displacement*0.2),int(self.max_displacement*0.8), 0, 0))
return torch.cat([torch.sum(x_1 * x_2[:, :, :, _y:_y + x_1.size(3)], 1).unsqueeze(1) for _y in
range(0, self.max_displacement +1, self.stride_2)], 1)
class CorrelationLayer1DMinus(nn.Module):
def __init__(self, max_disp=40, stride_2=1):
super(CorrelationLayer1DMinus, self).__init__()
self.max_displacement = max_disp
self.stride_2 = stride_2
def forward(self, x_1, x_2):
x_1 = x_1
ee=0.000001
x_2 = F.pad(x_2, (int(self.max_displacement*0.2),int(self.max_displacement*0.8), 0, 0))
minus=torch.cat([torch.sum(x_1 - x_2[:, :, :, _y:_y + x_1.size(3)], 1).unsqueeze(1) for _y in
range(0, self.max_displacement +1, self.stride_2)], 1)
inverse=1/(minus+ee)
return torch.sigmoid_(inverse)
def costVolume(leftFeature,rightFeature,max_displacement):
cost = torch.zeros(leftFeature.size()[0], leftFeature.size()[1]*2, max_displacement, leftFeature.size()[2], leftFeature.size()[3])
for i in range(max_displacement):
if i > 0 :
cost[:, :leftFeature.size()[1], i, :,i:] = leftFeature[:,:,:,i:]
cost[:, leftFeature.size()[1]:, i, :,i:] = rightFeature[:,:,:,:-i]
else:
cost[:, :leftFeature.size()[1], i, :,:] = leftFeature
cost[:, leftFeature.size()[1]:, i, :,:] = rightFeature
cost = cost.contiguous()
return cost
class CorrelationLayerCosineSimilarity(nn.Module):
def __init__(self, max_disp=40, stride_2=1,dim=1,eps=1e-6):
super(CorrelationLayerCosineSimilarity, self).__init__()
self.max_displacement = max_disp
self.stride_2 = stride_2
self.cos=torch.nn.CosineSimilarity(dim=1,eps=1e-6)
def forward(self, x_1, x_2):
x_1 = x_1
x_2 = F.pad(x_2, (int(self.max_displacement*0),int(self.max_displacement*1), 0, 0))
similarity=torch.cat([self.cos(x_1 ,x_2[:, :, :, _y:_y + x_1.size(3)]).unsqueeze(1) for _y in
range(0, self.max_displacement +1, self.stride_2)], 1)
return similarity
def costVolume2(leftFeature,rightFeature,max_displacement):
cost = torch.zeros(leftFeature.size()[0], leftFeature.size()[1]*2, max_displacement, leftFeature.size()[2], leftFeature.size()[3]).cuda()
for b in range(cost.size()[0]):
i=0
while i < cost.size()[1]:
for j in range(max_displacement):
if j>0:
cost[b,i,j,:,j:]=leftFeature[b,i//2,:,j:]
cost[b,i+1,j,:,j:]=rightFeature[b,i//2,:,:-j]
else:
cost[b,i,j,:,:]=leftFeature[b,i//2,...]
cost[b,i+1,j,:,:]=rightFeature[b,i//2,...]
i+=2
return cost
class SegmentationModuleBase(nn.Module):
def __init__(self):
super(SegmentationModuleBase, self).__init__()
def pixel_acc(self, pred, label):
_, preds = torch.max(pred, dim=1)
valid = (label >= 0).long()
acc_sum = torch.sum(valid * (preds == label).long())
pixel_sum = torch.sum(valid)
acc = acc_sum.float() / (pixel_sum.float() + 1e-10)
return acc
class SegmentationModule(SegmentationModuleBase):
def __init__(self, net_enc, net_dec, crit, deep_sup_scale=None):
super(SegmentationModule, self).__init__()
self.encoder = net_enc
self.decoder = net_dec
self.crit = crit
self.deep_sup_scale = deep_sup_scale
def forward(self, feed_dict, *, segSize=None):
if segSize is None: # training
if self.deep_sup_scale is not None: # use deep supervision technique
(pred, pred_deepsup) = self.decoder(self.encoder(feed_dict['data'], return_feature_maps=True))
else:
pred = self.decoder(self.encoder(feed_dict['data'], return_feature_maps=True))
loss = self.crit(pred, feed_dict[cfg.SEM.OUTPUT_PRIFEX+'_0'])
if self.deep_sup_scale is not None:
for i in range(2, len(cfg.SEM.DOWNSAMPLE)):
loss_deepsup = self.crit(pred_deepsup,
feed_dict['{}_{}'.format(cfg.SEM.OUTPUT_PRIFEX, i)])
loss = loss + loss_deepsup * self.deep_sup_scale[i]
acc = self.pixel_acc(pred, feed_dict[cfg.SEM.OUTPUT_PRIFEX+'_0'])
return loss, acc
else: # inference
pred = self.decoder(self.encoder(feed_dict['data'], return_feature_maps=True), segSize=segSize)
return pred
def conv3x3(in_planes, out_planes, stride=1, has_bias=False):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=has_bias)
def conv3x3_bn_relu(in_planes, out_planes, stride=1):
return nn.Sequential(
conv3x3(in_planes, out_planes, stride),
SynchronizedBatchNorm2d(out_planes),
nn.ReLU(inplace=True),
)
class ModelBuilder():
# custom weights initialization
def weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight.data)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(1e-4)
#elif classname.find('Linear') != -1:
# m.weight.data.normal_(0.0, 0.0001)
def build_encoder(self, arch='resnet50_dilated8', fc_dim=512, weights=''):
pretrained = True if len(weights) == 0 else False
if arch == 'resnet18':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet18_dilated8':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnet18_dilated16':
orig_resnet = resnet.__dict__['resnet18'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=16)
elif arch == 'resnet34':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet34_dilated8':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnet34_dilated16':
raise NotImplementedError
orig_resnet = resnet.__dict__['resnet34'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=16)
elif arch == 'resnet50':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet50_dilated8':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnet50_dilated8_3DConv':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = ResnetDilated3DConv(orig_resnet,
dilate_scale=8)
elif arch == 'resnet50_dilated16':
orig_resnet = resnet.__dict__['resnet50'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=16)
elif arch == 'resnet101':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnet)
elif arch == 'resnet101_dilated8':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnet101_dilated16':
orig_resnet = resnet.__dict__['resnet101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=16)
elif arch == 'resnext101':
orig_resnext = resnext.__dict__['resnext101'](pretrained=pretrained)
net_encoder = Resnet(orig_resnext) # we can still use class Resnet
elif arch == 'resnext101_dilated8':
orig_resnet = resnext.__dict__['resnext101'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
elif arch == 'resnext101_dilated8_64':
orig_resnet = resnext.__dict__['resnext101_64'](pretrained=pretrained)
net_encoder = ResnetDilated(orig_resnet,
dilate_scale=8)
else:
raise Exception('Architecture undefined!')
# net_encoder.apply(self.weights_init)
if len(weights) > 0:
print('Loading weights for net_encoder')
net_encoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_encoder
def build_decoder(self, arch='ppm_bilinear_deepsup',
fc_dim=512, num_class=150,
weights='', use_softmax=False):
if arch == 'c1_bilinear_deepsup':
net_decoder = C1BilinearDeepSup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'c1_bilinear':
net_decoder = C1Bilinear(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm_bilinear':
net_decoder = PPMBilinear(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm_bilinear_deepsup':
net_decoder = PPMBilinearDeepsup(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'ppm_bilinear3D':
net_decoder = PPMBilinear3D(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax)
elif arch == 'upernet_lite':
net_decoder = UPerNet(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=256)
elif arch == 'upernet':
net_decoder = UPerNet(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=512)
elif arch == 'upernet_tmp':
net_decoder = UPerNetTmp(
num_class=num_class,
fc_dim=fc_dim,
use_softmax=use_softmax,
fpn_dim=512)
else:
raise Exception('Architecture undefined!')
net_decoder.apply(self.weights_init)
if len(weights) > 0:
print('Loading weights for net_decoder')
net_decoder.load_state_dict(
torch.load(weights, map_location=lambda storage, loc: storage), strict=False)
return net_decoder
class Resnet(nn.Module):
def __init__(self, orig_resnet):
super(Resnet, self).__init__()
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
self.correlation=CorrelationLayer1D(max_disp=40,stride_2=1)
self.conv_rdi = nn.Sequential(nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True))
self.conv_r = nn.Conv2d(357, 512, kernel_size=3, stride=1,padding=1, bias=False)
self.bn4=SynchronizedBatchNorm2d(512)
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x); #256
x = self.layer2(x); conv_out.append(x); #512
left, right=torch.split(x, cfg.TRAIN.IMS_PER_BATCH, dim=0)
corr=self.correlation(left,right)
conv_rdi=self.conv_rdi(left)
x =torch.cat((conv_rdi,corr),dim=1)
x=self.relu2(self.bn4(self.conv_r(x)))
x = torch.cat((left, x), dim=0)
x = self.layer3(x); conv_out.append(x); #1024
x = self.layer4(x); conv_out.append(x); #2048
if return_feature_maps:
return conv_out
return [x]
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
#print("layer1:",x.shape)
x = self.layer2(x); conv_out.append(x);
#print("layer2:",x.shape)
left, right=torch.split(x, cfg.TRAIN.IMS_PER_BATCH, dim=0)
#print("left:",left.shape)
#print("right:",right.shape)
corr=self.correlation(left,right)
#print("corr:",corr.shape)
conv_rdi=self.conv_rdi(left)
#print("conv_rdi:",conv_rdi.shape)
x =torch.cat((conv_rdi,corr),dim=1)
x=self.relu2(self.bn4(self.conv_r(x)))
x = torch.cat((left, x), dim=0)
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
class ResnetDilated3DConv(nn.Module):
def __init__(self, orig_resnet, dilate_scale=8,max_displacement=40):
super(ResnetDilated3DConv, self).__init__()
from functools import partial
self.max_displacement=max_displacement
if dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
if cfg.SEM.LAYER_FIXED:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.conv2.parameters():
param.requires_grad = False
for param in self.conv3.parameters():
param.requires_grad = False
for param in self.layer1.parameters():
param.requires_grad = False
for param in self.layer2.parameters():
param.requires_grad = False
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
conv_out.append(x)
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
class ResnetDilated(nn.Module):
def __init__(self, orig_resnet, dilate_scale=8):
super(ResnetDilated, self).__init__()
from functools import partial
if dilate_scale == 8:
orig_resnet.layer3.apply(
partial(self._nostride_dilate, dilate=2))
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=4))
elif dilate_scale == 16:
orig_resnet.layer4.apply(
partial(self._nostride_dilate, dilate=2))
# take pretrained resnet, except AvgPool and FC
self.conv1 = orig_resnet.conv1
self.bn1 = orig_resnet.bn1
self.relu1 = orig_resnet.relu1
self.conv2 = orig_resnet.conv2
self.bn2 = orig_resnet.bn2
self.relu2 = orig_resnet.relu2
self.conv3 = orig_resnet.conv3
self.bn3 = orig_resnet.bn3
self.relu3 = orig_resnet.relu3
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
if cfg.DISP.COST_VOLUME_TYPE == 'CorrelationLayer1D':
self.correlation=CorrelationLayer1D(max_disp=40,stride_2=1)
if cfg.DISP.COST_VOLUME_TYPE == 'CorrelationLayer1DMinus':
self.correlation=CorrelationLayer1DMinus(max_disp=40,stride_2=1)
if cfg.DISP.COST_VOLUME_TYPE =='CorrelationLayerCosineSimilarity':
self.correlation=CorrelationLayerCosineSimilarity(max_disp=40)
self.bn4=SynchronizedBatchNorm2d(512)
self.conv_rdi = nn.Sequential(nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True))
self.conv_r = nn.Conv2d(297, 512, kernel_size=3, stride=1,padding=1, bias=False)
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.conv2.parameters():
param.requires_grad = False
if cfg.SEM.LAYER_FIXED:
for param in self.conv1.parameters():
param.requires_grad = False
for param in self.conv2.parameters():
param.requires_grad = False
for param in self.conv3.parameters():
param.requires_grad = False
for param in self.layer1.parameters():
param.requires_grad = False
for param in self.layer2.parameters():
param.requires_grad = False
def _nostride_dilate(self, m, dilate):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
# the convolution with stride
if m.stride == (2, 2):
m.stride = (1, 1)
if m.kernel_size == (3, 3):
m.dilation = (dilate//2, dilate//2)
m.padding = (dilate//2, dilate//2)
# other convoluions
else:
if m.kernel_size == (3, 3):
m.dilation = (dilate, dilate)
m.padding = (dilate, dilate)
def forward(self, x, return_feature_maps=False):
conv_out = []
x = self.relu1(self.bn1(self.conv1(x)))
conv_out.append(x)
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x); conv_out.append(x);
x = self.layer2(x); conv_out.append(x);
left, right=torch.split(x, cfg.TRAIN.IMS_PER_BATCH, dim=0)
corr=self.correlation(left,right)
conv_rdi=self.conv_rdi(left)
x =torch.cat((conv_rdi,corr),dim=1)
x=self.relu2(self.bn4(self.conv_r(x)))
x = torch.cat((left, x), dim=0)
x = self.layer3(x); conv_out.append(x);
x = self.layer4(x); conv_out.append(x);
if return_feature_maps:
return conv_out
return [x]
# last conv, bilinear upsample
class C1BilinearDeepSup(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1BilinearDeepSup, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
# deep sup
conv4 = conv_out[-2]
_ = self.cbr_deepsup(conv4)
_ = self.conv_last_deepsup(_)
x = nn.functional.log_softmax(x, dim=1)
_ = nn.functional.log_softmax(_, dim=1)
return (x, _)
# last conv, bilinear upsample
class C1Bilinear(nn.Module):
def __init__(self, num_class=150, fc_dim=2048, use_softmax=False):
super(C1Bilinear, self).__init__()
self.use_softmax = use_softmax
self.cbr = conv3x3_bn_relu(fc_dim, fc_dim // 4, 1)
# last conv
self.conv_last = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
x = self.cbr(conv5)
x = self.conv_last(x)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
else:
x = nn.functional.log_softmax(x, dim=1)
return x
# pyramid pooling, bilinear upsample
class PPMBilinear(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPMBilinear, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.conv_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1)
)
def forward(self, conv_out, segSize=None):
if cfg.SEM.USE_RESNET:
conv5=conv_out
else:
conv5 = conv_out[-1]
#conv5=conv_out
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.conv_last(ppm_out)
return x
# pyramid pooling, bilinear upsample
class PPMBilinearDeepsup(nn.Module):
def __init__(self, num_class=150, fc_dim=1024,
use_softmax=False, pool_scales=(1, 2, 3, 6)):
super(PPMBilinearDeepsup, self).__init__()
self.use_softmax = use_softmax
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
#SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
#self.reduce=nn.Conv2d(fc_dim*2,fc_dim,kernel_size=1,stride=1,padding=0,bias=False)
#self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
self.aspp_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1)
)
#self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
#self.dropout_deepsup = nn.Dropout2d(0.1)
def forward(self, conv_out, segSize=None):
if cfg.SEM.USE_RESNET:
conv5=conv_out
else:
conv5 = conv_out[-1]
#conv_out, 2, c, h, w, dim 0 is semseg and disp
input_size = conv5.size()
semseg_conv, disp_conv = torch.split(conv5, input_size[0]//2 ,dim=0)
#conv5 is 1, 2*c, h, w
conv5 = torch.cat([semseg_conv, disp_conv], dim=1)
#conv5=self.reduce(conv5)
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.aspp_last(ppm_out)
# deep sup
conv4 = conv_out[-2]
#_ = self.cbr_deepsup(conv4)
#_ = self.dropout_deepsup(_)
#_ = self.conv_last_deepsup(_)
#X = nn.functional.log_softmax(x, dim=1)
#_ = nn.functional.log_softmax(_, dim=1)
return [x, conv4]
class PPMBilinear3D(nn.Module):
def __init__(self, num_class=150, fc_dim=2048,
use_softmax=False, pool_scales=(1, 2, 3, 6),channelsReduction=19):
super(PPMBilinear3D, self).__init__()
self.use_softmax = use_softmax
self.channelsReduction=channelsReduction
self.ppm = []
self.width=96
self.height=96
self.semseg=cfg.MODEL.NUM_CLASSES
self.max_displacement=cfg.DISP.FEATURE_MAX_DISPLACEMENT
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool2d(scale),
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
#self.cbr_deepsup = conv3x3_bn_relu(fc_dim // 2, fc_dim // 4, 1)
self.aspp_last = nn.Sequential(
nn.Conv2d(fc_dim+len(pool_scales)*512, 512,
kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Dropout2d(0.1)
)
cost_channels = channelsReduction*2
self.stack0 = self._createStack(cost_channels,cost_channels,stride1=1)
self.stack1_1 = self._createStack(cost_channels,cost_channels*2)
self.stack1_2 = self._createStack(cost_channels*2,cost_channels*4)
self.stack1_3 = self._createStack(cost_channels*4,cost_channels*8)
self.stack2_1 = self._Deconv3D(cost_channels*8,cost_channels*4)
self.stack2_2 = self._Deconv3D(cost_channels*4,cost_channels*2)
self.stack2_3 = self._Deconv3D(cost_channels*2,cost_channels)
self.gcn1=GCNASPP(cost_channels*4,self.semseg,self.max_displacement//4,self.height//4,self.width//4,scale=2,pool_scales=(4,8,13,24))
self.gcn2=GCNASPP(cost_channels*2,self.semseg,self.max_displacement//2,self.height//2,self.width//2,scale=1,pool_scales=(2,4,6,12))
self.gcn3=GCNASPP(cost_channels,self.semseg,self.max_displacement,self.height,self.width,scale=0,pool_scales=(2,3,4,6))
self.reduce = nn.Sequential(
nn.Conv2d(512,self.channelsReduction,kernel_size=1,stride=1,bias=False),
nn.BatchNorm2d(channelsReduction)
)
for m in self.modules():
if isinstance(m,nn.Conv2d) or isinstance(m,nn.Conv3d) or isinstance(m,nn.ConvTranspose3d):
kaiming_normal_(m.weight,0.1)
if m.bias is not None:
constant_(m.bias,0)
elif isinstance(m,nn.BatchNorm2d) or isinstance(m,nn.BatchNorm3d):
constant_(m.weight,1)
constant_(m.bias,0)
#self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
#self.dropout_deepsup = nn.Dropout2d(0.1)
def _createStack(self,inplanes=512,planes=256,kernel_size=3,stride1=2,groups=19,stride2=1,bias=False,padding=1):
return nn.Sequential(
nn.Conv3d(inplanes,planes,kernel_size=3,stride=stride1,groups=groups,padding=1,bias=False),
nn.BatchNorm3d(planes),
nn.Conv3d(planes,planes,kernel_size=3,stride=stride2,groups=groups,padding=1,bias=False),
nn.BatchNorm3d(planes),
nn.ReLU(inplace=True)
)
def _Deconv3D(self,inplanes,planes,kernel_size=3,stride=2,padding=1,out_padding=1,groups=19,bias=False):
return nn.ConvTranspose3d(inplanes,planes,kernel_size,stride,padding,out_padding,groups=groups,bias=bias)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False))
ppm_out = torch.cat(ppm_out, 1)
x = self.aspp_last(ppm_out)
x = self.reduce(x)
left, right=torch.split(x, cfg.TRAIN.IMS_PER_BATCH, dim=0)
cost = costVolume2(left,right,cfg.DISP.FEATURE_MAX_DISPLACEMENT)
stack0=self.stack0(cost)
stack1_1=self.stack1_1(stack0)
stack1_2=self.stack1_2(stack1_1)
stack1_3=self.stack1_3(stack1_2)
stack2_1=self.stack2_1(stack1_3)
stack2_2=self.stack2_2(stack2_1)
stack2_3=self.stack2_3(stack2_2)
if self.training:
#gcn1=self.gcn1(stack2_1)
#gcn2=self.gcn2(stack2_2)
gcn3=self.gcn3(stack2_3)
return gcn3
else:
gcn3=self.gcn3(stack2_3)
return gcn3
class GCNASPP(nn.Module):
def __init__(self,inplanes,planes,d,h,w,scale,pool_scales=(2,4,8,16)):
super(GCNASPP,self).__init__()
self.inplanes=inplanes
self.planes=planes
self.semsegNums=19
self.disparity=self._Conv3d(self.inplanes,self.planes,kernel_size=(11,1,1),padding=(5,0,0))
self.width=self._Conv3d(self.inplanes,self.planes,kernel_size=(1,1,11),padding=(0,0,5))
self.height=self._Conv3d(self.inplanes,self.planes,kernel_size=(1,11,1),padding=(0,5,0))
self.ppm = []
for scale in pool_scales:
self.ppm.append(nn.Sequential(
nn.AdaptiveAvgPool3d(scale),
nn.Conv3d(self.semsegNums,self.semsegNums,kernel_size=1,bias=False),
nn.BatchNorm3d(self.semsegNums),
nn.ReLU(inplace=True)
))
self.ppm = nn.ModuleList(self.ppm)
self.aspp_last = nn.Sequential(
nn.Conv3d(5*self.semsegNums,self.semsegNums,kernel_size=3,padding=1,bias=False),
nn.BatchNorm3d(self.semsegNums),
nn.ReLU(inplace=True),
nn.Dropout3d(0.1)
)
for m in self.modules():
if isinstance(m,nn.Conv2d) or isinstance(m,nn.Conv3d) or isinstance(m,nn.ConvTranspose3d):
kaiming_normal_(m.weight,0.1)
if m.bias is not None:
constant_(m.bias,0)
elif isinstance(m,nn.BatchNorm2d) or isinstance(m,nn.BatchNorm3d):
constant_(m.weight,1)
constant_(m.bias,0)
def _Conv3d(self,inplanes,planes,kernel_size,stride=1,groups=1,padding=1):
return nn.Sequential(
nn.Conv3d(inplanes,planes,kernel_size,stride,padding=padding,bias=False),
nn.BatchNorm3d(planes),
nn.ReLU(inplace=True)
)
def forward(self,x):
disparity=self.disparity(x)
width = self.width(x)
height = self.height(x)
out=disparity+width+height
input_size = (out).size()
ppm_out=[out]
for pool_scale in self.ppm:
ppm_out.append(nn.functional.interpolate(
pool_scale(out),(input_size[2],input_size[3],input_size[4]),
mode='trilinear',align_corners=False
))
ppm_out=torch.cat(ppm_out,1)
out = self.aspp_last(ppm_out)
return out
# upernet
class UPerNet(nn.Module):
def __init__(self, num_class=150, fc_dim=4096,
use_softmax=False, pool_scales=(1, 2, 3, 6),
fpn_inplanes=(256,512,1024,2048), fpn_dim=256):
super(UPerNet, self).__init__()
self.use_softmax = use_softmax
# PPM Module
self.ppm_pooling = []
self.ppm_conv = []
for scale in pool_scales:
self.ppm_pooling.append(nn.AdaptiveAvgPool2d(scale))
self.ppm_conv.append(nn.Sequential(
nn.Conv2d(fc_dim, 512, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(512),
nn.ReLU(inplace=True)
))
self.ppm_pooling = nn.ModuleList(self.ppm_pooling)
self.ppm_conv = nn.ModuleList(self.ppm_conv)
self.ppm_last_conv = conv3x3_bn_relu(fc_dim + len(pool_scales)*512, fpn_dim, 1)
# FPN Module
self.fpn_in = []
for fpn_inplane in fpn_inplanes[:-1]: # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(fpn_inplane, fpn_dim, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(fpn_inplanes) - 1): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_last = nn.Sequential(
conv3x3_bn_relu(len(fpn_inplanes) * fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, num_class, kernel_size=1)
)
def forward(self, conv_out, segSize=None):
conv5 = conv_out[-1]
input_size = conv5.size()
ppm_out = [conv5]
for pool_scale, pool_conv in zip(self.ppm_pooling, self.ppm_conv):
ppm_out.append(pool_conv(nn.functional.interpolate(
pool_scale(conv5),
(input_size[2], input_size[3]),
mode='bilinear', align_corners=False)))
ppm_out = torch.cat(ppm_out, 1)
f = self.ppm_last_conv(ppm_out)
fpn_feature_list = [f]
for i in reversed(range(len(conv_out) - 1)):
conv_x = conv_out[i]
conv_x = self.fpn_in[i](conv_x) # lateral branch
f = nn.functional.interpolate(
f, size=conv_x.size()[2:], mode='bilinear', align_corners=False) # top-down branch
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(nn.functional.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
x = self.conv_last(fusion_out)
if self.use_softmax: # is True during inference
x = nn.functional.interpolate(
x, size=segSize, mode='bilinear', align_corners=False)
x = nn.functional.softmax(x, dim=1)
return x
x = nn.functional.log_softmax(x, dim=1)
class MiniPSMNet(nn.Module):
def __init__(self):
super(MiniPSMNet,self).__init__()
self.channelsReduction=cfg.SEM.SD_DIM
self.ppm = []
self.width=96
self.height=96
self.semseg=19
self.max_displacement=cfg.DISP.FEATURE_MAX_DISPLACEMENT
cost_channels = self.channelsReduction*2
self.stack0 = self._createStack(cost_channels,cost_channels,stride1=1)
self.stack1 = self._createStack(cost_channels,cost_channels,stride1=1)
self.stack1_1 = self._createStack(cost_channels,cost_channels*2)
self.stack1_2 = self._createStack(cost_channels*2,cost_channels*4)
self.stack1_3 = self._createStack(cost_channels*4,cost_channels*8)
self.stack2_1 = self._Deconv3D(cost_channels*8,cost_channels*4)
self.stack2_2 = self._Deconv3D(cost_channels*4,cost_channels*2)
self.stack2_3 = self._Deconv3D(cost_channels*2,cost_channels)
self.to2D = nn.Conv3d(cost_channels,1,kernel_size=1,strid=1)
self.reduce = self._ruduce2D(512,self.channelsReduction)
self.predict=self._predict(cost_channels)
"""
self.reduce = nn.Sequential(
nn.Conv2d(512,self.channelsReduction,kernel_size=1,stride=1,bias=False),
nn.BatchNorm2d(self.channelsReduction)
)
"""
for m in self.modules():
if isinstance(m,nn.Conv2d) or isinstance(m,nn.Conv3d) or isinstance(m,nn.ConvTranspose3d):
kaiming_normal_(m.weight,0.1)
if m.bias is not None:
constant_(m.bias,0)
elif isinstance(m,nn.BatchNorm2d) or isinstance(m,nn.BatchNorm3d):
constant_(m.weight,1)
constant_(m.bias,0)
#self.conv_last_deepsup = nn.Conv2d(fc_dim // 4, num_class, 1, 1, 0)
#self.dropout_deepsup = nn.Dropout2d(0.1)
def _createStack(self,inplanes=512,planes=256,kernel_size=3,stride1=2,stride2=1,groups=cfg.GROUP_NORM.NUM_GROUPS,bias=False,padding=1):
return nn.Sequential(
nn.Conv3d(inplanes,planes,kernel_size=3,stride=stride1,groups=groups,padding=1,bias=False),
nn.BatchNorm3d(planes),
nn.ReLU(inplace=True),
nn.Conv3d(planes,planes,kernel_size=3,stride=stride2,groups=groups,padding=1,bias=False),
nn.BatchNorm3d(planes),
nn.ReLU(inplace=True)
)
def _Deconv3D(self,inplanes,planes,kernel_size=3,stride=2,padding=1,out_padding=1,groups=19,bias=False):
return nn.ConvTranspose3d(inplanes,planes,kernel_size,stride,padding,out_padding,groups=cfg.GROUP_NORM.NUM_GROUPS,bias=bias)
def _ruduce2D(self,inplanes,planes):
return nn.Sequential(
nn.Conv2d(inplanes,planes,kernel_size=1,strid=1),
nn.Conv2d(planes,planes,kernel_size=3,strid=1,padding=1),
nn.BatchNorm2d(inplanes),
nn.ReLU(inplace=True)
)
def _predict(self,inplanes):
return nn.Sequential(
nn.Conv2d(inplanes,1,kernel_size=1,strid=1),
nn.ReLU(inplace=True)
)
def forward(self, conv_out):
x = self.reduce(conv_out)
left, right=torch.split(x, cfg.TRAIN.IMS_PER_BATCH, dim=0)
cost = costVolume2(left,right,self.max_displacement)
stack0=self.stack0(cost)
stack1=self.stack1(stack0)
stack1_1=self.stack1_1(stack1)
stack1_2=self.stack1_2(stack1_1)
stack1_3=self.stack1_3(stack1_2)
stack2_1=self.stack2_1(stack1_3)+stack1_2
stack2_2=self.stack2_2(stack2_1)+stack1_1
stack2_3=self.stack2_3(stack2_2)+stack1
out2d=self.to2D(stack2_3)
out=torch.squeeze(out2d,dim=1)
predict = self.predict(out)
return [out,predict]
class TConv(nn.Module):
def __init__(self, in_channels=256, out_channels=256, kernel_size=3, stride=1, padding=1):
super(TConv, self).__init__()
self.conv = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=padding, bias=False)
def forward(self, x):
return F.leaky_relu(self.conv.forward(x), negative_slope=0.1, inplace=True)
class FusionNet(nn.Module):
def __init__(self,inplanes):
super(FusionNet,self).__init__()
self.out_channels=32
self.rdi = nn.Conv2d(512+cfg.SEM.SD_DIM*2,self.out_channels*8)
self.upconv8_4 = self._TConv(self.out_channels*8,self.out_channels*4)
self.upconv4_2 = self._TConv(self.out_channels*4,self.out_channels*2)
self.upconv2_1 = self._TConv(self.out_channels*2,self.out_channels)
self.pr8 = nn.Conv2d(self.out_channels*8,1,kernel_size=3,strid=1,padding=1,bias=False) #512
self.pr4 = nn.Conv2d(self.out_channels*4,1,kernel_size=3,strid=1,padding=1,bias=False) #256
self.pr2 = nn.Conv2d(self.out_channels*2,1,kernel_size=3,strid=1,padding=1,bias=False) #128
self.pr1 = nn.Conv2d(self.out_channels,1,kernel_size=3,strid=1,padding=1,bias=False) #64
self.fusion8=self._fusion(512+512+cfg.SEM.SD_DIM*2,self.out_channels*8)
self.fusion4=self._fusion(self.out_channels*4+256,self.out_channels*4)
self.fusion2=self._fusion(self.out_channels*2+128,self.out_channels*2)
self.fusion1=self._fusion(self.out_channels*1,self.out_channels)
def _Tconv(self,inplanes,planes):
return nn.Sequential(
nn.ConvTranspose2d(inplanes,planes,kernel_size=3,strid=2,padding=1),
nn.Conv2d(planes,planes,kernel_size=3,stride=1,padding=1,bias=False),
nn.BatchNorm2d(planes),
nn.LeakyReLU(negative_slope=0.1,inplace=True)
)
def _fusion(self,inplanes,planes,kernel_size=3,stride=1,padding=1):
return nn.Sequential(
nn.Conv2d(inplanes,planes,kernel_size=kernel_size,stride=stride,padding=padding,bias=False),
nn.Conv2d(planes,planes,kernel_size=3,stride=1,padding=1,bias=False),
nn.BatchNorm2d(planes),
nn.LeakyReLU(negative_slope=0.1,inplace=True))
def forward(self,semdisp,psm,resFeature):
pred_semseg, pred_disp = torch.split(pred, cfg.TRAIN.IMS_PER_BATCH, dim=0)
conv1a, _ = torch.split(FeatureMap[0], cfg.TRAIN.IMS_PER_BATCH, dim=0) #64channels
#_ , conv1a = torch.split(conv1a, cfg.TRAIN.IMS_PER_BATCH, dim=0)
conv2a, _ = torch.split(FeatureMap[1], cfg.TRAIN.IMS_PER_BATCH, dim=0) #128channels
#_ , conv2a = torch.split(conv2a, cfg.TRAIN.IMS_PER_BATCH, dim=0)
_, layer4 = torch.split(FeatureMap[4], cfg.TRAIN.IMS_PER_BATCH, dim=0)
feature8 = self.fusion8(torch.cat((pred_disp,psm,layer4),dim=1))
pr8=self.pr8(feature8)
upfeature8_4=self.upconv8_4(torch.cat(pr8,feature8),dim=1)
feature4 = self.fusion4(torch.cat((upfeature8_4,conv2a),dim=1))
pr4=self.pr4(feature4)
upfeature4_2=self.upconv4_2(torch.cat(pr4,feature4),dim=1)
feature2 = self.fusion2(torch.cat((upfeature4_2,conv1a),dim=1))
pr2=self.pr2(feature2)
upfeature2_1 =sefl.upconv2_1(torch.cat(pr2,feature2),dim=1)
pr1=self.pr1(torch.cat(upfeature2_1),dim=1)
return[pr1,pr2,pr4,pr8]
class MiniCSPN(nn.Module):
def __init__(self,in_channels):
super(MiniCSPN,self).__init__()
self.in_channels=in_channels
self.FupCat=[]
fpn_dim = cfg.SEM.DIM
self.predisp_16x = nn.Sequential(
nn.Conv2d(2048, in_channels, kernel_size=3, padding=1, bias=False),
SynchronizedBatchNorm2d(in_channels),
nn.ReLU(inplace=True))
for i in range(4):
self.FupCat.append(
Gudi_UpProj_Block_Cat(self.in_channels//2**i,self.in_channels//2**(i+1)))
self.FupCat=nn.ModuleList(self.FupCat)
#disp output side
self.merge_spp_list = []
self.merge_spp_down = []
for i in range(5):
self.merge_spp_down.append(nn.Sequential(
nn.Conv2d(512, self.in_channels//2**i, kernel_size=1, padding=0, bias=False),
SynchronizedBatchNorm2d(self.in_channels//2**i),
nn.ReLU(inplace=True)))
self.merge_spp_list.append(nn.Sequential(
conv3x3_bn_relu(2*self.in_channels//2**i, self.in_channels//2**i, 1),
conv3x3_bn_relu(self.in_channels//2**i, 1, 1)
))
self.merge_spp_list = nn.ModuleList(self.merge_spp_list)
self.merge_spp_down = nn.ModuleList(self.merge_spp_down)
self.disp_outside = []
# FPN Module
self.fpn_in = []
for i in range(len(cfg.SEM.FPN_DIMS)): # skip the top layer
self.fpn_in.append(nn.Sequential(
nn.Conv2d(cfg.SEM.FPN_DIMS[i], fpn_dim, kernel_size=1, bias=False),
SynchronizedBatchNorm2d(fpn_dim),
nn.ReLU(inplace=True)
))
self.fpn_in = nn.ModuleList(self.fpn_in)
self.fpn_out = []
for i in range(len(cfg.SEM.FPN_DIMS)): # skip the top layer
self.fpn_out.append(nn.Sequential(
conv3x3_bn_relu(fpn_dim, fpn_dim, 1),
))
self.fpn_out = nn.ModuleList(self.fpn_out)
self.conv_last = nn.Sequential(
conv3x3_bn_relu(len(cfg.SEM.FPN_DIMS) * fpn_dim + fpn_dim, fpn_dim, 1),
nn.Conv2d(fpn_dim, cfg.MODEL.NUM_CLASSES, kernel_size=1)
)
self.semseg_deepsup=nn.Sequential(
conv3x3_bn_relu(1024, 512, 1),
nn.Conv2d(512, 19, kernel_size=3,padding=1,bias=False))
for m in self.modules():
if isinstance(m,nn.Conv2d):
kaiming_normal_(m.weight,0.1)
if m.bias is not None:
constant_(m.bias,0)
elif isinstance(m,nn.BatchNorm2d):
constant_(m.weight,1)
constant_(m.bias,0)
def _conv(self,inplanes,planes,kernel_size=3,stride=1,padding=1,bias=False):
return nn.Sequential(
nn.Conv2d(inplanes,planes,kernel_size,stride=stride,padding=padding,bias=bias),
nn.BatchNorm2d(planes),
nn.ReLU(inplace=True)
)
def _semOut(self,inplanes,kernel_size=3,stride=1,padding=1,bias=False):
return nn.Sequential(
nn.Conv2d(inplanes,19,kernel_size=kernel_size,stride=stride,padding=padding,bias=bias))
def _out(self,inplanes,kernel_size=3,stride=1,padding=1,bias=False):
return nn.Sequential(
nn.Conv2d(inplanes,inplanes,kernel_size=kernel_size,stride=1,padding=1,bias=True),
nn.BatchNorm2d(inplanes),
nn.ReLU(inplace=True),
nn.Conv2d(inplanes,1,kernel_size=kernel_size,stride=1,padding=1,bias=True))
def _up_pooling(self, x, scale_factor,mode='bilinear',oheight=0,owidth=0):
if mode =='bilinear':
return nn.functional.interpolate(x,scale_factor=scale_factor, mode='bilinear')
x = nn.Upsample(scale_factor=scale, mode='nearest')(x)
if oheight !=0 and owidth !=0:
x = x[:,:,0:oheight, 0:owidth]
mask = torch.zeros_like(x)
for h in range(0,oheight, 2):
for w in range(0, owidth, 2):
mask[:,:,h,w] = 1
x = torch.mul(mask, x)
return x
def forward(self,sspp,resFeature,left,right):
#decode: start from followed basic
res16x_semseg, res16x_disp = torch.split(resFeature[-1],cfg.TRAIN.IMS_PER_BATCH,dim=0)
# disp decoder
self.disp_outside=[]
dispNx_in = self.predisp_16x(res16x_disp)
self.disp_outside.append(dispNx_in)
#use up_cat to decoder
for i in range(4):
dispNx_in =self.FupCat[i](dispNx_in, left, right, ratio=0)
self.disp_outside.append(dispNx_in)
for i in range(5):
sspp_i = self.merge_spp_down[i](sspp)
sspp_i = F.interpolate(sspp_i, size=self.disp_outside[i].size()[2:], mode='bilinear', align_corners=False)
self.disp_outside[i] = self.merge_spp_list[i](torch.cat([self.disp_outside[i], sspp_i], dim=1))
#decode for semseg
fpn_feature_list = [sspp]
f = sspp
for i in range(len(cfg.SEM.FPN_DIMS)):
conv_x, _ = torch.split(resFeature[i+1], cfg.TRAIN.IMS_PER_BATCH,dim=0)
conv_x = self.fpn_in[i](conv_x)
f = F.interpolate(f, size=conv_x.size()[2:], mode='bilinear', align_corners=False)
f = conv_x + f
fpn_feature_list.append(self.fpn_out[i](f))
fpn_feature_list.reverse() # [P2 - P5]
output_size = fpn_feature_list[0].size()[2:]
fusion_list = [fpn_feature_list[0]]
for i in range(1, len(fpn_feature_list)):
fusion_list.append(nn.functional.interpolate(
fpn_feature_list[i],
output_size,
mode='bilinear', align_corners=False))
fusion_out = torch.cat(fusion_list, 1)
semseg_maps = self.conv_last(fusion_out)
semseg_final = self._up_pooling(semseg_maps, scale_factor=4)
res4_semseg, _ = torch.split(resFeature[-2], cfg.TRAIN.IMS_PER_BATCH, dim=0)
semseg_res4=self.semseg_deepsup(res4_semseg)
return self.disp_outside, [semseg_res4, semseg_final]
class Gudi_UpProj_Block(nn.Module):
def __init__(self, in_channels, out_channels, oheight=0, owidth=0):
super(Gudi_UpProj_Block, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
self.sc_bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.oheight = oheight
self.owidth = owidth
for m in self.modules():
if isinstance(m,nn.Conv2d):
kaiming_normal_(m.weight,0.1)
if m.bias is not None:
constant_(m.bias,0)
elif isinstance(m,nn.BatchNorm2d):
constant_(m.weight,1)
constant_(m.bias,0)
def _up_pooling(self, x, scale):
x = nn.Upsample(scale_factor=scale, mode='nearest')(x)
if self.oheight !=0 and self.owidth !=0:
x = x[:,:,0:self.oheight, 0:self.owidth]
mask = torch.zeros_like(x)
for h in range(0, self.oheight, 2):
for w in range(0, self.owidth, 2):
mask[:,:,h,w] = 1
x = torch.mul(mask, x)
return x
def forward(self, x):
x = self._up_pooling(x, 2)
out = self.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
short_cut = self.sc_bn1(self.sc_conv1(x))
out += short_cut
out = self.relu(out)
return out
class Gudi_UpProj_Block_Cat(nn.Module):
def __init__(self, in_channels, out_channels, oheight=0, owidth=0):
super(Gudi_UpProj_Block_Cat, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=2, dilation=2, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv1_1 = nn.Conv2d(out_channels+6, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=2, dilation=2, bias=False)
self.sc_bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.oheight = oheight
self.owidth = owidth
def _up_pooling(self, x, scale,mode='bilinear',oheight=0,owidth=0):
if mode =='bilinear':
return nn.functional.interpolate(x,scale_factor=scale, mode='bilinear', align_corners=False)
x = nn.Upsample(scale_factor=scale, mode='nearest')(x)
if oheight !=0 and owidth !=0:
x = x[:,:,0:oheight, 0:owidth]
mask = torch.zeros_like(x)
for h in range(0,oheight, 2):
for w in range(0, owidth, 2):
mask[:,:,h,w] = 1
x = torch.mul(mask, x)
return x
def forward(self, x, left,right,ratio=0):
x = self._up_pooling(x, 2)
left=F.interpolate(left, x.size()[2:], mode='bilinear', align_corners=False)
right=F.interpolate(right, x.size()[2:], mode='bilinear', align_corners=False)
out = self.relu(self.bn1(self.conv1(x)))
out = torch.cat((out, left,right), 1)
out = self.relu(self.bn1_1(self.conv1_1(out)))
out = self.bn2(self.conv2(out))
short_cut = self.sc_bn1(self.sc_conv1(x))
out += short_cut
out = self.relu(out)
return out
class OriginalGudi_UpProj_Block_Cat(nn.Module):
def __init__(self, in_channels, out_channels, oheight=0, owidth=0):
super(OriginalGudi_UpProj_Block_Cat, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv1_1 = nn.Conv2d(out_channels*2, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
self.sc_conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=5, stride=1, padding=2, bias=False)
self.sc_bn1 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
self.oheight = oheight
self.owidth = owidth
def _up_pooling(self, x, scale):
x = nn.Upsample(scale_factor=scale, mode='nearest')(x)
if self.oheight !=0 and self.owidth !=0:
x = x[:,:,0:self.oheight, 0:self.owidth]
mask = torch.zeros_like(x)
for h in range(0, self.oheight, 2):
for w in range(0, self.owidth, 2):
mask[:,:,h,w] = 1
x = torch.mul(mask, x)
return x
def forward(self, x, side_input):
x = self._up_pooling(x, 2)
out = self.relu(self.bn1(self.conv1(x)))
out = torch.cat((out, side_input), 1)
out = self.relu(self.bn1_1(self.conv1_1(out)))
out = self.bn2(self.conv2(out))
short_cut = self.sc_bn1(self.sc_conv1(x))
out += short_cut
out = self.relu(out)
return out
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_dmcrypt
----------------------------------
Tests for `dmcrypt` module.
"""
import base64
from unittest import mock
from vaultlocker import dmcrypt
from vaultlocker.tests.unit import base
class TestDMCrypt(base.TestCase):
@mock.patch.object(dmcrypt, 'subprocess')
def test_luks_format(self, _subprocess):
dmcrypt.luks_format('mykey', '/dev/sdb', 'test-uuid')
_subprocess.check_output.assert_called_once_with(
['cryptsetup',
'--batch-mode',
'--uuid', 'test-uuid',
'--key-file', '-',
'luksFormat', '/dev/sdb'],
input='mykey'.encode('UTF-8')
)
@mock.patch.object(dmcrypt, 'subprocess')
def test_luks_open(self, _subprocess):
dmcrypt.luks_open('mykey', 'test-uuid')
_subprocess.check_output.assert_called_once_with(
['cryptsetup',
'--batch-mode',
'--key-file', '-',
'open', 'UUID=test-uuid', 'crypt-test-uuid',
'--type', 'luks'],
input='mykey'.encode('UTF-8')
)
@mock.patch.object(dmcrypt, 'os')
def test_generate_key(self, _os):
_key = b'randomdatastringfromentropy'
_os.urandom.return_value = _key
self.assertEqual(dmcrypt.generate_key(),
base64.b64encode(_key).decode('UTF-8'))
_os.urandom.assert_called_with(dmcrypt.KEY_SIZE / 8)
@mock.patch.object(dmcrypt, 'subprocess')
def test_udevadm_rescan(self, _subprocess):
dmcrypt.udevadm_rescan('/dev/vdb')
_subprocess.check_output.assert_called_once_with(
['udevadm',
'trigger',
'--name-match=/dev/vdb',
'--action=add']
)
@mock.patch.object(dmcrypt, 'subprocess')
def test_udevadm_settle(self, _subprocess):
dmcrypt.udevadm_settle('myuuid')
_subprocess.check_output.assert_called_once_with(
['udevadm',
'settle',
'--exit-if-exists=/dev/disk/by-uuid/myuuid']
)
|
nilq/baby-python
|
python
|
#
# @lc app=leetcode id=1022 lang=python3
#
# [1022] Sum of Root To Leaf Binary Numbers
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumRootToLeaf(self, root: TreeNode):
if not root:
return 0
self.bins = []
self.finder(root, '')
ans = 0
for item in self.bins:
cur = 0
digit = 0
while item:
cur += (int(item[-1]) & 1) * (1 << digit)
item = item[:-1]
digit += 1
ans += cur
return ans
def finder(self, root, path):
path = path + str(root.val)
if not root.left and not root.right:
self.bins.append(path)
return
if root.left:
self.finder(root.left, path)
if root.right:
self.finder(root.right, path)
# @lc code=end
|
nilq/baby-python
|
python
|
"""Setup script of django-blog-zinnia"""
from setuptools import find_packages
from setuptools import setup
import zinnia
setup(
dependency_links=[
"git+https://github.com/arrobalytics/django-tagging.git@027eb90c88ad2d4aead4f50bbbd8d6f0b1678954#egg=django-tagging",
"git+https://github.com/arrobalytics/django-xmlrpc.git@6cf59c555b207de7ecec75ac962751e8245cf8c9#egg=django-xmlrpc",
"git+https://github.com/arrobalytics/mots-vides.git@eaeccf73bdb415d0c5559ccd74de360b37a2bbac#egg=mots-vides",
],
name="django-blog-zinnia",
version=zinnia.__version__,
description="A clear and powerful weblog application powered with Django",
long_description="\n".join([open("README.rst").read(), open("CHANGELOG").read()]),
keywords="django, blog, weblog, zinnia, post, news",
author=zinnia.__author__,
author_email=zinnia.__email__,
url=zinnia.__url__,
packages=find_packages(exclude=["demo"]),
classifiers=[
"Framework :: Django",
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
],
license=zinnia.__license__,
include_package_data=True,
zip_safe=False,
install_requires=[
"asgiref>=3.4.1; python_version >= '3.6'",
"beautifulsoup4>=4.10.0",
"django>=2.2",
"django-contrib-comments>=2.1.0",
"django-js-asset>=1.2.2",
"django-mptt>=0.13.4",
"html5lib>=1.1; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3, 3.4'",
"importlib-metadata>=4.9.0; python_version < '3.10'",
"markdown>=3.3.6",
"pillow>=8.4.0",
"pyparsing>=3.0.6",
"regex>=2021.11.10",
"six>=1.16.0; python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
"soupsieve>=2.3.1; python_version >= '3.6'",
"sqlparse>=0.4.2; python_version >= '3.5'",
"textile>=4.0.2",
"webencodings>=0.5.1",
"zipp>=3.6.0; python_version >= '3.6'",
],
)
|
nilq/baby-python
|
python
|
from numbers import Number
from timegraph.drawing.plotter import Plotter
class Drawing:
def __init__(self):
self.plotter = Plotter()
def create_graph(self, title, db_response):
value_list = self.get_value_list(db_response.get_points())
self.plotter.plot_timeseries(value_list)
def get_value_list(self, points):
result = []
for point in points:
point_keys = point.keys()
for key in point_keys:
if key != 'time':
if (point[key] is not None and
isinstance(point[key], Number)):
result.append(point[key])
return result
def print_graph(self, lines):
for line in lines:
print(line)
class DrawingException(Exception):
def __init__(self, code, message):
super().__init__(code, message)
self.code = code
self.message = message
|
nilq/baby-python
|
python
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def __init__(self):
self.res=[]
def printnode(self,start,end):
if start==end:
return
if start.next==end:
# deal with end point the last element but not none
#if end and not end.next:
# self.res.append(end.val)
self.res.append(start.val)
return
if start.next.next==end:
# deal with end point the last element but not none
#if end and not end.next:
# self.res.append(end.val)
self.res.append(start.next.val)
self.res.append(start.val)
return
slow=start
fast=start
while fast!=end:
slow=slow.next
fast=fast.next.next if fast.next!=end else end
#print start.val,end.val,slow.val,fast.val
self.printnode(slow,fast)
self.printnode(start,slow)
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head:
return self.res
if not head.next:
self.res.append(head.val)
return self.res
slow=head
fast=head
while fast:
slow=slow.next
fast=fast.next.next if fast.next else None
#print slow.val,fast.val
self.printnode(slow,fast)
self.printnode(head,slow)
return self.res
|
nilq/baby-python
|
python
|
from django.shortcuts import render
# Create your views here.
def about_view(request):
return render(request, 'about/about.html')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
v13 model
* Input: v12_im
Author: Kohei <i@ho.lc>
"""
from logging import getLogger, Formatter, StreamHandler, INFO, FileHandler
from pathlib import Path
import subprocess
import glob
import math
import sys
import json
import re
import warnings
import scipy
import tqdm
import click
import tables as tb
import pandas as pd
import numpy as np
from keras.models import Model
from keras.engine.topology import merge as merge_l
from keras.layers import (
Input, Convolution2D, MaxPooling2D, UpSampling2D,
Reshape, core, Dropout,
Activation, BatchNormalization)
from keras.optimizers import Adam, SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping, History
from keras import backend as K
import skimage.draw
import rasterio
import rasterio.features
import shapely.wkt
import shapely.ops
import shapely.geometry
MODEL_NAME = 'v13'
ORIGINAL_SIZE = 650
INPUT_SIZE = 256
STRIDE_SZ = 197
BASE_DIR = "/data/train"
BASE_TEST_DIR = "/data/test"
WORKING_DIR = "/data/working"
IMAGE_DIR = "/data/working/images/{}".format('v12')
V5_IMAGE_DIR = "/data/working/images/{}".format('v5')
# ---------------------------------------------------------
# Parameters
MIN_POLYGON_AREA = 30 # 30
# ---------------------------------------------------------
# Input files
FMT_TRAIN_SUMMARY_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("summaryData/{prefix:s}_Train_Building_Solutions.csv"))
FMT_TRAIN_RGB_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TEST_RGB_IMAGE_PATH = str(
Path(BASE_TEST_DIR) /
Path("{prefix:s}_Test/") /
Path("RGB-PanSharpen/RGB-PanSharpen_{image_id:s}.tif"))
FMT_TRAIN_MSPEC_IMAGE_PATH = str(
Path(BASE_DIR) /
Path("{prefix:s}_Train/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
FMT_TEST_MSPEC_IMAGE_PATH = str(
Path(BASE_TEST_DIR) /
Path("{prefix:s}_Test/") /
Path("MUL-PanSharpen/MUL-PanSharpen_{image_id:s}.tif"))
# ---------------------------------------------------------
# Preprocessing result
FMT_RGB_BANDCUT_TH_PATH = IMAGE_DIR + "/rgb_bandcut.csv"
FMT_MUL_BANDCUT_TH_PATH = IMAGE_DIR + "/mul_bandcut.csv"
# ---------------------------------------------------------
# Image list, Image container and mask container
FMT_VALTRAIN_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtrain_ImageId.csv"
FMT_VALTEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_valtest_ImageId.csv"
FMT_VALTRAIN_IM_STORE = IMAGE_DIR + "/valtrain_{}_im.h5"
FMT_VALTEST_IM_STORE = IMAGE_DIR + "/valtest_{}_im.h5"
FMT_VALTRAIN_MASK_STORE = IMAGE_DIR + "/valtrain_{}_mask.h5"
FMT_VALTEST_MASK_STORE = IMAGE_DIR + "/valtest_{}_mask.h5"
FMT_VALTRAIN_MUL_STORE = IMAGE_DIR + "/valtrain_{}_mul.h5"
FMT_VALTEST_MUL_STORE = IMAGE_DIR + "/valtest_{}_mul.h5"
FMT_TRAIN_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_train_ImageId.csv"
FMT_TEST_IMAGELIST_PATH = V5_IMAGE_DIR + "/{prefix:s}_test_ImageId.csv"
FMT_TRAIN_IM_STORE = IMAGE_DIR + "/train_{}_im.h5"
FMT_TEST_IM_STORE = IMAGE_DIR + "/test_{}_im.h5"
FMT_TRAIN_MASK_STORE = IMAGE_DIR + "/train_{}_mask.h5"
FMT_TRAIN_MUL_STORE = IMAGE_DIR + "/train_{}_mul.h5"
FMT_TEST_MUL_STORE = IMAGE_DIR + "/test_{}_mul.h5"
FMT_MULMEAN = IMAGE_DIR + "/{}_mulmean.h5"
# ---------------------------------------------------------
# Model files
MODEL_DIR = "/data/working/models/{}".format(MODEL_NAME)
FMT_VALMODEL_PATH = MODEL_DIR + "/{}_val_weights.h5"
FMT_FULLMODEL_PATH = MODEL_DIR + "/{}_full_weights.h5"
FMT_VALMODEL_HIST = MODEL_DIR + "/{}_val_hist.csv"
FMT_VALMODEL_EVALHIST = MODEL_DIR + "/{}_val_evalhist.csv"
FMT_VALMODEL_EVALTHHIST = MODEL_DIR + "/{}_val_evalhist_th.csv"
# ---------------------------------------------------------
# Prediction & polygon result
FMT_TESTPRED_PATH = MODEL_DIR + "/{}_pred.h5"
FMT_VALTESTPRED_PATH = MODEL_DIR + "/{}_eval_pred.h5"
FMT_VALTESTPOLY_PATH = MODEL_DIR + "/{}_eval_poly.csv"
FMT_VALTESTTRUTH_PATH = MODEL_DIR + "/{}_eval_poly_truth.csv"
FMT_VALTESTPOLY_OVALL_PATH = MODEL_DIR + "/eval_poly.csv"
FMT_VALTESTTRUTH_OVALL_PATH = MODEL_DIR + "/eval_poly_truth.csv"
FMT_TESTPOLY_PATH = MODEL_DIR + "/{}_poly.csv"
FN_SOLUTION_CSV = "data/output/{}.csv".format(MODEL_NAME)
# ---------------------------------------------------------
# Model related files (others)
FMT_VALMODEL_LAST_PATH = MODEL_DIR + "/{}_val_weights_last.h5"
FMT_FULLMODEL_LAST_PATH = MODEL_DIR + "/{}_full_weights_last.h5"
# ---------------------------------------------------------
# warnins and logging
warnings.simplefilter("ignore", UserWarning)
handler = StreamHandler()
handler.setLevel(INFO)
handler.setFormatter(Formatter('%(asctime)s %(levelname)s %(message)s'))
fh_handler = FileHandler(".{}.log".format(MODEL_NAME))
fh_handler.setFormatter(Formatter('%(asctime)s %(levelname)s %(message)s'))
logger = getLogger(__name__)
logger.setLevel(INFO)
if __name__ == '__main__':
logger.addHandler(handler)
logger.addHandler(fh_handler)
# Fix seed for reproducibility
np.random.seed(1145141919)
def directory_name_to_area_id(datapath):
"""
Directory name to AOI number
Usage:
>>> directory_name_to_area_id("/data/test/AOI_2_Vegas")
2
"""
dir_name = Path(datapath).name
if dir_name.startswith('AOI_2_Vegas'):
return 2
elif dir_name.startswith('AOI_3_Paris'):
return 3
elif dir_name.startswith('AOI_4_Shanghai'):
return 4
elif dir_name.startswith('AOI_5_Khartoum'):
return 5
else:
raise RuntimeError("Unsupported city id is given.")
def _remove_interiors(line):
if "), (" in line:
line_prefix = line.split('), (')[0]
line_terminate = line.split('))",')[-1]
line = (
line_prefix +
'))",' +
line_terminate
)
return line
def _calc_fscore_per_aoi(area_id):
prefix = area_id_to_prefix(area_id)
truth_file = FMT_VALTESTTRUTH_PATH.format(prefix)
poly_file = FMT_VALTESTPOLY_PATH.format(prefix)
cmd = [
'java',
'-jar',
'/root/visualizer-2.0/visualizer.jar',
'-truth',
truth_file,
'-solution',
poly_file,
'-no-gui',
'-band-triplets',
'/root/visualizer-2.0/data/band-triplets.txt',
'-image-dir',
'pass',
]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout_data, stderr_data = proc.communicate()
lines = [line for line in stdout_data.decode('utf8').split('\n')[-10:]]
"""
Overall F-score : 0.85029
AOI_2_Vegas:
TP : 27827
FP : 4999
FN : 4800
Precision: 0.847712
Recall : 0.852883
F-score : 0.85029
"""
if stdout_data.decode('utf8').strip().endswith("Overall F-score : 0"):
overall_fscore = 0
tp = 0
fp = 0
fn = 0
precision = 0
recall = 0
fscore = 0
elif len(lines) > 0 and lines[0].startswith("Overall F-score : "):
assert lines[0].startswith("Overall F-score : ")
assert lines[2].startswith("AOI_")
assert lines[3].strip().startswith("TP")
assert lines[4].strip().startswith("FP")
assert lines[5].strip().startswith("FN")
assert lines[6].strip().startswith("Precision")
assert lines[7].strip().startswith("Recall")
assert lines[8].strip().startswith("F-score")
overall_fscore = float(re.findall("([\d\.]+)", lines[0])[0])
tp = int(re.findall("(\d+)", lines[3])[0])
fp = int(re.findall("(\d+)", lines[4])[0])
fn = int(re.findall("(\d+)", lines[5])[0])
precision = float(re.findall("([\d\.]+)", lines[6])[0])
recall = float(re.findall("([\d\.]+)", lines[7])[0])
fscore = float(re.findall("([\d\.]+)", lines[8])[0])
else:
logger.warn("Unexpected data >>> " + stdout_data.decode('utf8'))
raise RuntimeError("Unsupported format")
return {
'overall_fscore': overall_fscore,
'tp': tp,
'fp': fp,
'fn': fn,
'precision': precision,
'recall': recall,
'fscore': fscore,
}
def prefix_to_area_id(prefix):
area_dict = {
'AOI_1_Rio': 1,
'AOI_2_Vegas': 2,
'AOI_3_Paris': 3,
'AOI_4_Shanghai': 4,
'AOI_5_Khartoum': 5,
}
return area_dict[area_id]
def area_id_to_prefix(area_id):
"""
area_id から prefix を返す
"""
area_dict = {
1: 'AOI_1_Rio',
2: 'AOI_2_Vegas',
3: 'AOI_3_Paris',
4: 'AOI_4_Shanghai',
5: 'AOI_5_Khartoum',
}
return area_dict[area_id]
# ---------------------------------------------------------
# main
def _get_model_parameter(area_id):
prefix = area_id_to_prefix(area_id)
fn_hist = FMT_VALMODEL_EVALTHHIST.format(prefix)
best_row = pd.read_csv(fn_hist).sort_values(
by='fscore',
ascending=False,
).iloc[0]
param = dict(
fn_epoch=int(best_row['zero_base_epoch']),
min_poly_area=int(best_row['min_area_th']),
)
return param
def _internal_test_predict_best_param(area_id,
save_pred=True):
prefix = area_id_to_prefix(area_id)
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
min_th = param['min_poly_area']
# Prediction phase
logger.info("Prediction phase: {}".format(prefix))
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn = FMT_TESTPRED_PATH.format(prefix)
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_test_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=True,
),
val_samples=len(df_test) * 9,
)
del model
# Save prediction result
if save_pred:
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, 'pred', atom, y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_test(area_id):
prefix = area_id_to_prefix(area_id)
y_pred = _internal_test_predict_best_param(area_id, save_pred=False)
# Postprocessing phase
logger.info("Postprocessing phase")
# if not Path(FMT_VALTESTPOLY_PATH.format(prefix)).exists():
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_TESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
for idx, image_id in enumerate(df_test.index.tolist()):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
def _internal_validate_predict_best_param(area_id,
enable_tqdm=False):
"""
best param で valtest の prediction proba を return する
y_pred は保存しない
(used from ensemble model)
"""
param = _get_model_parameter(area_id)
epoch = param['fn_epoch']
y_pred = _internal_validate_predict(
area_id,
epoch=epoch,
save_pred=False,
enable_tqdm=enable_tqdm)
return y_pred
def _internal_validate_predict(area_id,
epoch=3,
save_pred=True,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
X_mean = get_mul_mean_image(area_id)
# Load model weights
# Predict and Save prediction result
fn_model = FMT_VALMODEL_PATH.format(prefix + '_{epoch:02d}')
fn_model = fn_model.format(epoch=epoch)
model = get_unet()
model.load_weights(fn_model)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
y_pred = model.predict_generator(
generate_valtest_batch(
area_id,
batch_size=64,
immean=X_mean,
enable_tqdm=enable_tqdm,
),
val_samples=len(df_test) * 9,
)
del model
# Save prediction result
if save_pred:
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'w') as f:
atom = tb.Atom.from_dtype(y_pred.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root,
'pred',
atom,
y_pred.shape,
filters=filters)
ds[:] = y_pred
return y_pred
def _internal_validate_fscore_wo_pred_file(area_id,
epoch=3,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# Prediction phase
logger.info("Prediction phase")
y_pred = _internal_validate_predict(
area_id,
save_pred=False,
epoch=epoch,
enable_tqdm=enable_tqdm)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def _internal_validate_fscore(area_id,
epoch=3,
predict=True,
min_th=MIN_POLYGON_AREA,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
# Prediction phase
logger.info("Prediction phase")
if predict:
_internal_validate_predict(
area_id,
epoch=epoch,
enable_tqdm=enable_tqdm)
# Postprocessing phase
logger.info("Postprocessing phase")
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test, index_col='ImageId')
fn = FMT_VALTESTPRED_PATH.format(prefix)
with tb.open_file(fn, 'r') as f:
y_pred = np.array(f.get_node('/pred'))
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
test_list = df_test.index.tolist()
iterator = enumerate(test_list)
for idx, image_id in tqdm.tqdm(iterator, total=len(test_list)):
pred_values = np.zeros((650, 650))
pred_count = np.zeros((650, 650))
for slice_pos in range(9):
slice_idx = idx * 9 + slice_pos
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
pred_values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += (
y_pred[slice_idx][0]
)
pred_count[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE] += 1
pred_values = pred_values / pred_count
df_poly = mask_to_poly(pred_values, min_polygon_area_th=min_th)
if len(df_poly) > 0:
for i, row in df_poly.iterrows():
line = "{},{},\"{}\",{:.6f}\n".format(
image_id,
row.bid,
row.wkt,
row.area_ratio)
line = _remove_interiors(line)
f.write(line)
else:
f.write("{},{},{},0\n".format(
image_id,
-1,
"POLYGON EMPTY"))
# ------------------------
# Validation solution file
logger.info("Validation solution file")
# if not Path(FMT_VALTESTTRUTH_PATH.format(prefix)).exists():
if True:
fn_true = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df_true = pd.read_csv(fn_true)
# # Remove prefix "PAN_"
# df_true.loc[:, 'ImageId'] = df_true.ImageId.str[4:]
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
df_test_image_ids = df_test.ImageId.unique()
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'w') as f:
f.write("ImageId,BuildingId,PolygonWKT_Pix,Confidence\n")
df_true = df_true[df_true.ImageId.isin(df_test_image_ids)]
for idx, r in df_true.iterrows():
f.write("{},{},\"{}\",{:.6f}\n".format(
r.ImageId,
r.BuildingId,
r.PolygonWKT_Pix,
1.0))
def mask_to_poly(mask, min_polygon_area_th=MIN_POLYGON_AREA):
mask = (mask > 0.5).astype(np.uint8)
shapes = rasterio.features.shapes(mask.astype(np.int16), mask > 0)
poly_list = []
mp = shapely.ops.cascaded_union(
shapely.geometry.MultiPolygon([
shapely.geometry.shape(shape)
for shape, value in shapes
]))
if isinstance(mp, shapely.geometry.Polygon):
df = pd.DataFrame({
'area_size': [mp.area],
'poly': [mp],
})
else:
df = pd.DataFrame({
'area_size': [p.area for p in mp],
'poly': [p for p in mp],
})
df = df[df.area_size > min_polygon_area_th].sort_values(
by='area_size', ascending=False)
df.loc[:, 'wkt'] = df.poly.apply(lambda x: shapely.wkt.dumps(
x, rounding_precision=0))
df.loc[:, 'bid'] = list(range(1, len(df) + 1))
df.loc[:, 'area_ratio'] = df.area_size / df.area_size.max()
return df
def jaccard_coef(y_true, y_pred):
smooth = 1e-12
intersection = K.sum(y_true * y_pred, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def jaccard_coef_int(y_true, y_pred):
smooth = 1e-12
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
intersection = K.sum(y_true * y_pred_pos, axis=[0, -1, -2])
sum_ = K.sum(y_true + y_pred_pos, axis=[0, -1, -2])
jac = (intersection + smooth) / (sum_ - intersection + smooth)
return K.mean(jac)
def generate_test_batch(area_id,
batch_size=64,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_test = pd.read_csv(FMT_TEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_TEST_MUL_STORE.format(prefix)
slice_id_list = []
for idx, row in df_test.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
if enable_tqdm:
pbar = tqdm.tqdm(total=len(slice_id_list))
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_test = []
y_test = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
mask = np.zeros((INPUT_SIZE, INPUT_SIZE)).astype(np.uint8)
y_test.append(mask)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_test = y_test.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_test = X_test - immean
if enable_tqdm:
pbar.update(y_test.shape[0])
yield (X_test, y_test)
if enable_tqdm:
pbar.close()
def generate_valtest_batch(area_id,
batch_size=8,
immean=None,
enable_tqdm=False):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
slice_id_list = []
for idx, row in df_train.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
if enable_tqdm:
pbar = tqdm.tqdm(total=len(slice_id_list))
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_train = []
y_train = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + slice_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
if enable_tqdm:
pbar.update(y_train.shape[0])
yield (X_train, y_train)
if enable_tqdm:
pbar.close()
def generate_valtrain_batch(area_id, batch_size=8, immean=None):
prefix = area_id_to_prefix(area_id)
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix))
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
slice_id_list = []
for idx, row in df_train.iterrows():
for slice_pos in range(9):
slice_id = row.ImageId + '_' + str(slice_pos)
slice_id_list.append(slice_id)
np.random.shuffle(slice_id_list)
while 1:
total_sz = len(slice_id_list)
n_batch = int(math.floor(total_sz / batch_size) + 1)
with tb.open_file(fn_im, 'r') as f_im,\
tb.open_file(fn_mask, 'r') as f_mask:
for i_batch in range(n_batch):
target_slice_ids = slice_id_list[
i_batch*batch_size:(i_batch+1)*batch_size
]
if len(target_slice_ids) == 0:
continue
X_train = []
y_train = []
for slice_id in target_slice_ids:
im = np.array(f_im.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
mask = np.array(f_mask.get_node('/' + slice_id))
mask = (mask > 0).astype(np.uint8)
y_train.append(mask)
X_train = np.array(X_train)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
if immean is not None:
X_train = X_train - immean
yield (X_train, y_train)
def get_unet():
conv_params = dict(activation='relu', border_mode='same')
merge_params = dict(mode='concat', concat_axis=1)
inputs = Input((8, 256, 256))
conv1 = Convolution2D(32, 3, 3, **conv_params)(inputs)
conv1 = Convolution2D(32, 3, 3, **conv_params)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(pool1)
conv2 = Convolution2D(64, 3, 3, **conv_params)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(pool2)
conv3 = Convolution2D(128, 3, 3, **conv_params)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(pool3)
conv4 = Convolution2D(256, 3, 3, **conv_params)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(pool4)
conv5 = Convolution2D(512, 3, 3, **conv_params)(conv5)
up6 = merge_l([UpSampling2D(size=(2, 2))(conv5), conv4], **merge_params)
conv6 = Convolution2D(256, 3, 3, **conv_params)(up6)
conv6 = Convolution2D(256, 3, 3, **conv_params)(conv6)
up7 = merge_l([UpSampling2D(size=(2, 2))(conv6), conv3], **merge_params)
conv7 = Convolution2D(128, 3, 3, **conv_params)(up7)
conv7 = Convolution2D(128, 3, 3, **conv_params)(conv7)
up8 = merge_l([UpSampling2D(size=(2, 2))(conv7), conv2], **merge_params)
conv8 = Convolution2D(64, 3, 3, **conv_params)(up8)
conv8 = Convolution2D(64, 3, 3, **conv_params)(conv8)
up9 = merge_l([UpSampling2D(size=(2, 2))(conv8), conv1], **merge_params)
conv9 = Convolution2D(32, 3, 3, **conv_params)(up9)
conv9 = Convolution2D(32, 3, 3, **conv_params)(conv9)
conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
optimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)
model = Model(input=inputs, output=conv10)
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=['accuracy', jaccard_coef, jaccard_coef_int])
return model
def get_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_IMMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/immean'))
return im_mean
def get_mul_mean_image(area_id):
prefix = area_id_to_prefix(area_id)
with tb.open_file(FMT_MULMEAN.format(prefix), 'r') as f:
im_mean = np.array(f.get_node('/mulmean'))
return im_mean
def get_train_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_train = []
fn_im = FMT_TRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_train.append(im)
X_train = np.array(X_train)
y_train = []
fn_mask = FMT_TRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
mask = np.array(f.get_node('/' + slice_id))
mask = (mask > 0.5).astype(np.uint8)
y_train.append(mask)
y_train = np.array(y_train)
y_train = y_train.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_train, y_train
def get_test_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_TEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_test = []
fn_im = FMT_TEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_test.append(im)
X_test = np.array(X_test)
return X_test
def get_valtest_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_test = FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)
df_test = pd.read_csv(fn_test)
X_val = []
fn_im = FMT_VALTEST_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTEST_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_test.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
mask = np.array(f.get_node('/' + slice_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def _get_valtrain_data_head(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
slice_pos = 5
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
slice_pos = 5
slice_id = image_id + '_' + str(slice_pos)
mask = np.array(f.get_node('/' + slice_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def get_valtrain_data(area_id):
prefix = area_id_to_prefix(area_id)
fn_train = FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix)
df_train = pd.read_csv(fn_train)
X_val = []
fn_im = FMT_VALTRAIN_MUL_STORE.format(prefix)
with tb.open_file(fn_im, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
im = np.array(f.get_node('/' + slice_id))
im = np.swapaxes(im, 0, 2)
im = np.swapaxes(im, 1, 2)
X_val.append(im)
X_val = np.array(X_val)
y_val = []
fn_mask = FMT_VALTRAIN_MASK_STORE.format(prefix)
with tb.open_file(fn_mask, 'r') as f:
for idx, image_id in enumerate(df_train.ImageId.tolist()):
for slice_pos in range(9):
slice_id = image_id + '_' + str(slice_pos)
mask = np.array(f.get_node('/' + slice_id))
mask = (mask > 0.5).astype(np.uint8)
y_val.append(mask)
y_val = np.array(y_val)
y_val = y_val.reshape((-1, 1, INPUT_SIZE, INPUT_SIZE))
return X_val, y_val
def __load_band_cut_th(band_fn, bandsz=3):
df = pd.read_csv(band_fn, index_col='area_id')
all_band_cut_th = {area_id: {} for area_id in range(2, 6)}
for area_id, row in df.iterrows():
for chan_i in range(bandsz):
all_band_cut_th[area_id][chan_i] = dict(
min=row['chan{}_min'.format(chan_i)],
max=row['chan{}_max'.format(chan_i)],
)
return all_band_cut_th
def get_slice_3chan_test_im(image_id, band_cut_th):
fn = test_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
assert values.shape == (650, 650, 3)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, 3)
yield slice_pos, im
def get_slice_3chan_im(image_id, band_cut_th):
fn = train_image_id_to_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(3):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
assert values.shape == (650, 650, 3)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, 3)
yield slice_pos, im
def get_slice_8chan_test_im(image_id, band_cut_th):
fn = test_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(8):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
assert values.shape == (650, 650, 8)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, 8)
yield slice_pos, im
def get_slice_8chan_im(image_id, band_cut_th):
fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(fn, 'r') as f:
values = f.read().astype(np.float32)
for chan_i in range(8):
min_val = band_cut_th[chan_i]['min']
max_val = band_cut_th[chan_i]['max']
values[chan_i] = np.clip(values[chan_i], min_val, max_val)
values[chan_i] = (values[chan_i] - min_val) / (max_val - min_val)
values = np.swapaxes(values, 0, 2)
values = np.swapaxes(values, 0, 1)
assert values.shape == (650, 650, 8)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im = values[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im.shape == (256, 256, 8)
yield slice_pos, im
def get_mask_im(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
def get_slice_mask_im(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
for slice_pos in range(9):
pos_j = int(math.floor(slice_pos / 3.0))
pos_i = int(slice_pos % 3)
x0 = STRIDE_SZ * pos_i
y0 = STRIDE_SZ * pos_j
im_mask_part = im_mask[x0:x0+INPUT_SIZE, y0:y0+INPUT_SIZE]
assert im_mask_part.shape == (256, 256)
yield slice_pos, im_mask_part
def prep_valtrain_test_slice_image(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("prep_valtrain_test_slice_image for {}".format(prefix))
df_train = pd.read_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_summary = load_train_summary_data(area_id)
# MUL
band_cut_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH, bandsz=8)[area_id]
fn = FMT_VALTRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for slice_pos, im in get_slice_8chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for slice_pos, im in get_slice_8chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
# RGB
band_cut_th = __load_band_cut_th(FMT_RGB_BANDCUT_TH_PATH)[area_id]
fn = FMT_VALTRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for slice_pos, im in get_slice_3chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for slice_pos, im in get_slice_3chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_VALTRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for pos, im_mask in get_slice_mask_im(df_summary, image_id):
atom = tb.Atom.from_dtype(im_mask.dtype)
slice_id = image_id + "_" + str(pos)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
fn = FMT_VALTEST_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for pos, im_mask in get_slice_mask_im(df_summary, image_id):
atom = tb.Atom.from_dtype(im_mask.dtype)
slice_id = image_id + "_" + str(pos)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def prep_train_test_slice_image(area_id):
prefix = area_id_to_prefix(area_id)
logger.info("prep_train_test_slice_images for {}".format(prefix))
df_train = pd.read_csv(
FMT_TRAIN_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_test = pd.read_csv(
FMT_TEST_IMAGELIST_PATH.format(prefix=prefix),
index_col='ImageId')
df_summary = load_train_summary_data(area_id)
# MUL
band_cut_th = __load_band_cut_th(
FMT_MUL_BANDCUT_TH_PATH, bandsz=8)[area_id]
fn = FMT_TRAIN_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for slice_pos, im in get_slice_8chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_MUL_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for slice_pos, im in get_slice_8chan_test_im(
image_id,
band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
# RGB
band_cut_th = __load_band_cut_th(FMT_RGB_BANDCUT_TH_PATH)[area_id]
fn = FMT_TRAIN_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for slice_pos, im in get_slice_3chan_im(image_id, band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TEST_IM_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_test.index, total=len(df_test)):
for slice_pos, im in get_slice_3chan_test_im(image_id,
band_cut_th):
slice_id = image_id + "_{}".format(slice_pos)
atom = tb.Atom.from_dtype(im.dtype)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im.shape,
filters=filters)
ds[:] = im
fn = FMT_TRAIN_MASK_STORE.format(prefix)
logger.info("Prepare image container: {}".format(fn))
if not Path(fn).exists():
with tb.open_file(fn, 'w') as f:
for image_id in tqdm.tqdm(df_train.index, total=len(df_train)):
for pos, im_mask in get_slice_mask_im(df_summary, image_id):
atom = tb.Atom.from_dtype(im_mask.dtype)
slice_id = image_id + "_" + str(pos)
filters = tb.Filters(complib='blosc', complevel=9)
ds = f.create_carray(f.root, slice_id, atom, im_mask.shape,
filters=filters)
ds[:] = im_mask
def calc_bandvalues_cut_threshold():
rows = []
for area_id in range(2, 6):
band_cut_th = __calc_mul_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(FMT_MUL_BANDCUT_TH_PATH, index=False)
rows = []
for area_id in range(2, 6):
band_cut_th = __calc_rgb_multiband_cut_threshold(area_id)
prefix = area_id_to_prefix(area_id)
row = dict(prefix=area_id_to_prefix(area_id))
row['area_id'] = area_id
for chan_i in band_cut_th.keys():
row['chan{}_max'.format(chan_i)] = band_cut_th[chan_i]['max']
row['chan{}_min'.format(chan_i)] = band_cut_th[chan_i]['min']
rows.append(row)
pd.DataFrame(rows).to_csv(FMT_RGB_BANDCUT_TH_PATH, index=False)
def __calc_rgb_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(3)}
band_cut_th = {k: dict(max=0, min=0) for k in range(3)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(3):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove sensored mask
band_values[i_chan].append(values_)
for i_chan in range(3):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def __calc_mul_multiband_cut_threshold(area_id):
prefix = area_id_to_prefix(area_id)
band_values = {k: [] for k in range(8)}
band_cut_th = {k: dict(max=0, min=0) for k in range(8)}
image_id_list = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove censored mask
band_values[i_chan].append(values_)
image_id_list = pd.read_csv(FMT_VALTEST_IMAGELIST_PATH.format(
prefix=prefix)).ImageId.tolist()
for image_id in tqdm.tqdm(image_id_list[:500]):
image_fn = train_image_id_to_mspec_path(image_id)
with rasterio.open(image_fn, 'r') as f:
values = f.read().astype(np.float32)
for i_chan in range(8):
values_ = values[i_chan].ravel().tolist()
values_ = np.array(
[v for v in values_ if v != 0]
) # Remove censored mask
band_values[i_chan].append(values_)
for i_chan in range(8):
band_values[i_chan] = np.concatenate(
band_values[i_chan]).ravel()
band_cut_th[i_chan]['max'] = scipy.percentile(
band_values[i_chan], 98)
band_cut_th[i_chan]['min'] = scipy.percentile(
band_values[i_chan], 2)
return band_cut_th
def train_image_id_to_mspec_path(image_id):
"""
"""
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_mspec_path(image_id):
"""
"""
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_MSPEC_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def train_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TRAIN_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def test_image_id_to_path(image_id):
prefix = image_id_to_prefix(image_id)
fn = FMT_TEST_RGB_IMAGE_PATH.format(
prefix=prefix,
image_id=image_id)
return fn
def image_id_to_prefix(image_id):
prefix = image_id.split('img')[0][:-1]
return prefix
def load_train_summary_data(area_id):
prefix = area_id_to_prefix(area_id)
fn = FMT_TRAIN_SUMMARY_PATH.format(prefix=prefix)
df = pd.read_csv(fn)
# df.loc[:, 'ImageId'] = df.ImageId.str[4:]
return df
def split_val_train_test(area_id):
prefix = area_id_to_prefix(area_id)
df = load_train_summary_data(area_id)
df_agg = df.groupby('ImageId').agg('first')
image_id_list = df_agg.index.tolist()
np.random.shuffle(image_id_list)
sz_valtrain = int(len(image_id_list) * 0.7)
sz_valtest = len(image_id_list) - sz_valtrain
# Parent directory
parent_dir = Path(FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix)).parent
if not parent_dir.exists():
parent_dir.mkdir(parents=True)
pd.DataFrame({'ImageId': image_id_list[:sz_valtrain]}).to_csv(
FMT_VALTRAIN_IMAGELIST_PATH.format(prefix=prefix),
index=False)
pd.DataFrame({'ImageId': image_id_list[sz_valtrain:]}).to_csv(
FMT_VALTEST_IMAGELIST_PATH.format(prefix=prefix),
index=False)
def get_image_mask_from_dataframe(df, image_id):
im_mask = np.zeros((650, 650))
for idx, row in df[df.ImageId == image_id].iterrows():
shape_obj = shapely.wkt.loads(row.PolygonWKT_Pix)
if shape_obj.exterior is not None:
coords = list(shape_obj.exterior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 1
interiors = shape_obj.interiors
for interior in interiors:
coords = list(interior.coords)
x = [round(float(pp[0])) for pp in coords]
y = [round(float(pp[1])) for pp in coords]
yy, xx = skimage.draw.polygon(y, x, (650, 650))
im_mask[yy, xx] = 0
im_mask = (im_mask > 0.5).astype(np.uint8)
return im_mask
@click.group()
def cli():
pass
@cli.command()
def testmerge():
# file check
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_TESTPOLY_PATH.format(prefix)
if not Path(fn_out).exists():
logger.info("Required file not found: {}".format(fn_out))
sys.exit(1)
# file check
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
if not Path(fn_out).exists():
logger.info("Required file not found: {}".format(fn_out))
sys.exit(1)
# merge files
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTPOLY_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
# remove interiors
line = _remove_interiors(line)
rows.append(line)
fn_out = FMT_VALTESTPOLY_OVALL_PATH
with open(fn_out, 'w') as f:
for line in rows:
f.write(line)
# merge files
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_VALTESTTRUTH_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
rows.append(line)
fn_out = FMT_VALTESTTRUTH_OVALL_PATH
with open(fn_out, 'w') as f:
for line in rows:
f.write(line)
# merge files
rows = []
for area_id in range(2, 6):
prefix = area_id_to_prefix(area_id)
fn_out = FMT_TESTPOLY_PATH.format(prefix)
with open(fn_out, 'r') as f:
line = f.readline()
if area_id == 2:
rows.append(line)
for line in f:
# remove interiors
line = _remove_interiors(line)
rows.append(line)
with open(FN_SOLUTION_CSV, 'w') as f:
for line in rows:
f.write(line)
@cli.command()
@click.argument('area_id', type=int)
def testproc(area_id):
prefix = area_id_to_prefix(area_id)
logger.info(">>>> Test proc for {}".format(prefix))
_internal_test(area_id)
logger.info(">>>> Test proc for {} ... done".format(prefix))
@cli.command()
@click.argument('area_id', type=int)
@click.option('--epoch', type=int, default=0)
@click.option('--th', type=int, default=MIN_POLYGON_AREA)
@click.option('--predict/--no-predict', default=False)
def validate_city_fscore(area_id, epoch, th, predict):
_internal_validate_fscore(
area_id,
epoch=epoch,
enable_tqdm=True,
min_th=th,
predict=predict)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['epoch'] = epoch
evaluate_record['min_area_th'] = th
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
@cli.command()
@click.argument('datapath', type=str)
def evalfscore(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info("Evaluate fscore on validation set: {}".format(prefix))
# for each epoch
# if not Path(FMT_VALMODEL_EVALHIST.format(prefix)).exists():
if True:
df_hist = pd.read_csv(FMT_VALMODEL_HIST.format(prefix))
df_hist.loc[:, 'epoch'] = list(range(1, len(df_hist) + 1))
rows = []
for zero_base_epoch in range(0, len(df_hist)):
logger.info(">>> Epoch: {}".format(zero_base_epoch))
_internal_validate_fscore_wo_pred_file(
area_id,
epoch=zero_base_epoch,
enable_tqdm=True,
min_th=MIN_POLYGON_AREA)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = zero_base_epoch
evaluate_record['min_area_th'] = MIN_POLYGON_AREA
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALHIST.format(prefix),
index=False)
# find best min-poly-threshold
df_evalhist = pd.read_csv(FMT_VALMODEL_EVALHIST.format(prefix))
best_row = df_evalhist.sort_values(by='fscore', ascending=False).iloc[0]
best_epoch = int(best_row.zero_base_epoch)
best_fscore = best_row.fscore
# optimize min area th
rows = []
for th in [30, 60, 90, 120, 150, 180, 210, 240]:
logger.info(">>> TH: {}".format(th))
predict_flag = False
if th == 30:
predict_flag = True
_internal_validate_fscore(
area_id,
epoch=best_epoch,
enable_tqdm=True,
min_th=th,
predict=predict_flag)
evaluate_record = _calc_fscore_per_aoi(area_id)
evaluate_record['zero_base_epoch'] = best_epoch
evaluate_record['min_area_th'] = th
evaluate_record['area_id'] = area_id
logger.info("\n" + json.dumps(evaluate_record, indent=4))
rows.append(evaluate_record)
pd.DataFrame(rows).to_csv(
FMT_VALMODEL_EVALTHHIST.format(prefix),
index=False)
logger.info("Evaluate fscore on validation set: {} .. done".format(prefix))
@cli.command()
@click.argument('datapath', type=str)
def validate(datapath):
area_id = directory_name_to_area_id(datapath)
prefix = area_id_to_prefix(area_id)
logger.info(">> validate sub-command: {}".format(prefix))
prefix = area_id_to_prefix(area_id)
logger.info("Loading valtest and mulmean ...")
X_mean = get_mul_mean_image(area_id)
X_val, y_val = get_valtest_data(area_id)
X_val = X_val - X_mean
if not Path(MODEL_DIR).exists():
Path(MODEL_DIR).mkdir(parents=True)
logger.info("Instantiate U-Net model")
model = get_unet()
model_checkpoint = ModelCheckpoint(
FMT_VALMODEL_PATH.format(prefix + "_{epoch:02d}"),
monitor='val_jaccard_coef_int',
save_best_only=False)
model_earlystop = EarlyStopping(
monitor='val_jaccard_coef_int',
patience=10,
verbose=0,
mode='max')
model_history = History()
df_train = pd.read_csv(FMT_VALTRAIN_IMAGELIST_PATH.format(
prefix=prefix))
logger.info("Fit")
model.fit_generator(
generate_valtrain_batch(area_id, batch_size=2, immean=X_mean),
samples_per_epoch=len(df_train) * 9,
nb_epoch=35,
verbose=1,
validation_data=(X_val, y_val),
callbacks=[model_checkpoint, model_earlystop, model_history])
model.save_weights(FMT_VALMODEL_LAST_PATH.format(prefix))
# Save evaluation history
pd.DataFrame(model_history.history).to_csv(
FMT_VALMODEL_HIST.format(prefix), index=False)
logger.info(">> validate sub-command: {} ... Done".format(prefix))
if __name__ == '__main__':
cli()
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, unicode_literals
import json
from mopidy.models import immutable
class ModelJSONEncoder(json.JSONEncoder):
"""
Automatically serialize Mopidy models to JSON.
Usage::
>>> import json
>>> json.dumps({'a_track': Track(name='name')}, cls=ModelJSONEncoder)
'{"a_track": {"__model__": "Track", "name": "name"}}'
"""
def default(self, obj):
if isinstance(obj, immutable.ImmutableObject):
return obj.serialize()
return json.JSONEncoder.default(self, obj)
def model_json_decoder(dct):
"""
Automatically deserialize Mopidy models from JSON.
Usage::
>>> import json
>>> json.loads(
... '{"a_track": {"__model__": "Track", "name": "name"}}',
... object_hook=model_json_decoder)
{u'a_track': Track(artists=[], name=u'name')}
"""
if '__model__' in dct:
model_name = dct.pop('__model__')
if model_name in immutable._models:
cls = immutable._models[model_name]
return cls(**dct)
return dct
|
nilq/baby-python
|
python
|
"""Generate a plot to visualize revision impact inequality based on data-flow
interactions."""
import typing as tp
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib import axes, style
from varats.data.databases.blame_interaction_database import (
BlameInteractionDatabase,
)
from varats.data.metrics import gini_coefficient, lorenz_curve
from varats.mapping.commit_map import CommitMap, get_commit_map
from varats.paper.case_study import CaseStudy
from varats.plot.plot import Plot, PlotDataEmpty
from varats.plot.plots import PlotGenerator
from varats.plots.repository_churn import (
build_repo_churn_table,
draw_code_churn,
)
from varats.project.project_util import get_local_project_git
from varats.ts_utils.click_param_types import REQUIRE_MULTI_CASE_STUDY
from varats.utils.git_util import (
ChurnConfig,
calc_repo_code_churn,
ShortCommitHash,
FullCommitHash,
)
def draw_interaction_lorenz_curve(
axis: axes.SubplotBase, data: pd.DataFrame, unique_rev_strs: tp.List[str],
consider_in_interactions: bool, consider_out_interactions: bool,
line_width: float
) -> None:
"""
Draws a lorenz_curve onto the given axis.
Args:
axis: matplot axis to draw on
data: plotting data
"""
if consider_in_interactions and consider_out_interactions:
data_selector = 'HEAD_Interactions'
elif consider_in_interactions:
data_selector = 'IN_HEAD_Interactions'
elif consider_out_interactions:
data_selector = 'OUT_HEAD_Interactions'
else:
raise AssertionError(
"At least one of the in/out interaction needs to be selected"
)
data.sort_values(by=[data_selector, 'time_id'], inplace=True)
lor = lorenz_curve(data[data_selector])
axis.plot(unique_rev_strs, lor, color='#cc0099', linewidth=line_width)
def draw_perfect_lorenz_curve(
axis: axes.SubplotBase, unique_rev_strs: tp.List[str], line_width: float
) -> None:
"""
Draws a perfect lorenz curve onto the given axis, i.e., a straight line from
the point of origin to the right upper corner.
Args:
axis: axis to draw to
data: plotting data
"""
axis.plot(
unique_rev_strs,
np.linspace(0.0, 1.0, len(unique_rev_strs)),
color='black',
linestyle='--',
linewidth=line_width
)
def draw_interaction_code_churn(
axis: axes.SubplotBase, data: pd.DataFrame, project_name: str,
commit_map: CommitMap
) -> None:
"""
Helper function to draw parts of the code churn that are related to our
data.
Args:
axis: to draw on
data: plotting data
project_name: name of the project
commit_map: CommitMap for the given project(by project_name)
"""
unique_revs = data['revision'].unique()
def remove_revisions_without_data(revision: ShortCommitHash) -> bool:
"""Removes all churn data where this plot has no data."""
return revision.hash in unique_revs
def apply_sorting(churn_data: pd.DataFrame) -> pd.DataFrame:
churn_data.set_index('time_id', inplace=True)
churn_data = churn_data.reindex(index=data['time_id'])
return churn_data.reset_index()
draw_code_churn(
axis, project_name, commit_map, remove_revisions_without_data,
apply_sorting
)
def filter_non_code_changes(
blame_data: pd.DataFrame, project_name: str
) -> pd.DataFrame:
"""
Filter all revision from data frame that are not code change related.
Args:
blame_data: data to filter
project_name: name of the project
Returns:
filtered data frame without rows related to non code changes
"""
repo = get_local_project_git(project_name)
code_related_changes = [
x.hash for x in calc_repo_code_churn(
repo, ChurnConfig.create_c_style_languages_config()
)
]
return blame_data[blame_data.apply(
lambda x: x['revision'] in code_related_changes, axis=1
)]
class BlameLorenzCurve(Plot, plot_name="b_lorenz_curve"):
"""Plots the lorenz curve for IN/OUT interactions for a given project."""
NAME = 'b_lorenz_curve'
def plot(self, view_mode: bool) -> None:
style.use(self.plot_config.style())
case_study: CaseStudy = self.plot_kwargs['case_study']
project_name: str = case_study.project_name
commit_map = get_commit_map(project_name)
fig = plt.figure()
fig.subplots_adjust(top=0.95, hspace=0.05, right=0.95, left=0.07)
grid_spec = fig.add_gridspec(3, 2)
main_axis = fig.add_subplot(grid_spec[:-1, :1])
main_axis.set_title("Lorenz curve for incoming commit interactions")
main_axis.get_xaxis().set_visible(False)
main_axis_r = fig.add_subplot(grid_spec[:-1, -1])
main_axis_r.set_title("Lorenz curve for outgoing commit interactions")
main_axis_r.get_xaxis().set_visible(False)
churn_axis = fig.add_subplot(grid_spec[2, :1], sharex=main_axis)
churn_axis_r = fig.add_subplot(grid_spec[2, -1], sharex=main_axis_r)
data = BlameInteractionDatabase.get_data_for_project(
project_name, [
"revision", "time_id", "IN_HEAD_Interactions",
"OUT_HEAD_Interactions", "HEAD_Interactions"
], commit_map, case_study
)
data = filter_non_code_changes(data, project_name)
if data.empty:
raise PlotDataEmpty
unique_rev_strs: tp.List[str] = [rev.hash for rev in data['revision']]
# Draw left side of the plot
draw_interaction_lorenz_curve(
main_axis, data, unique_rev_strs, True, False,
self.plot_config.line_width()
)
draw_perfect_lorenz_curve(
main_axis, unique_rev_strs, self.plot_config.line_width()
)
draw_interaction_code_churn(churn_axis, data, project_name, commit_map)
# Draw right side of the plot
draw_interaction_lorenz_curve(
main_axis_r, data, unique_rev_strs, False, True,
self.plot_config.line_width()
)
draw_perfect_lorenz_curve(
main_axis_r, unique_rev_strs, self.plot_config.line_width()
)
draw_interaction_code_churn(
churn_axis_r, data, project_name, commit_map
)
# Adapt axis to draw nicer plots
for x_label in churn_axis.get_xticklabels():
x_label.set_fontsize(self.plot_config.x_tick_size())
x_label.set_rotation(270)
x_label.set_fontfamily('monospace')
for x_label in churn_axis_r.get_xticklabels():
x_label.set_fontsize(self.plot_config.x_tick_size())
x_label.set_rotation(270)
x_label.set_fontfamily('monospace')
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class BlameLorenzCurveGenerator(
PlotGenerator,
generator_name="lorenz-curve-plot",
options=[REQUIRE_MULTI_CASE_STUDY]
):
"""Generates lorenz-curve plot(s) for the selected case study(ies)."""
def generate(self) -> tp.List[Plot]:
case_studies: tp.List[CaseStudy] = self.plot_kwargs.pop("case_study")
return [
BlameLorenzCurve(
self.plot_config, case_study=cs, **self.plot_kwargs
) for cs in case_studies
]
def draw_gini_churn_over_time(
axis: axes.SubplotBase, blame_data: pd.DataFrame,
unique_rev_strs: tp.List[str], project_name: str, commit_map: CommitMap,
consider_insertions: bool, consider_deletions: bool, line_width: float
) -> None:
"""
Draws the gini of the churn distribution over time.
Args:
axis: axis to draw to
blame_data: blame data of the base plot
project_name: name of the project
commit_map: CommitMap for the given project(by project_name)
consider_insertions: True, insertions should be included
consider_deletions: True, deletions should be included
line_width: line width of the plot lines
"""
churn_data = build_repo_churn_table(project_name, commit_map)
# clean data
unique_revs = blame_data['revision'].unique()
def remove_revisions_without_data(revision: ShortCommitHash) -> bool:
"""Removes all churn data where this plot has no data."""
return revision.hash[:10] in unique_revs
churn_data = churn_data[churn_data.apply(
lambda x: remove_revisions_without_data(x['revision']), axis=1
)]
# reorder churn data to match blame_data
churn_data.set_index('time_id', inplace=True)
churn_data = churn_data.reindex(index=blame_data['time_id'])
churn_data = churn_data.reset_index()
gini_churn = []
for time_id in blame_data['time_id']:
if consider_insertions and consider_deletions:
distribution = (
churn_data[churn_data.time_id <= time_id].insertions +
churn_data[churn_data.time_id <= time_id].deletions
).sort_values(ascending=True)
elif consider_insertions:
distribution = churn_data[churn_data.time_id <= time_id
].insertions.sort_values(ascending=True)
elif consider_deletions:
distribution = churn_data[churn_data.time_id <= time_id
].deletions.sort_values(ascending=True)
else:
raise AssertionError(
"At least one of the in/out interaction needs to be selected"
)
gini_churn.append(gini_coefficient(distribution))
if consider_insertions and consider_deletions:
linestyle = '-'
label = 'Insertions + Deletions'
elif consider_insertions:
linestyle = '--'
label = 'Insertions'
else:
linestyle = ':'
label = 'Deletions'
axis.plot(
unique_rev_strs,
gini_churn,
linestyle=linestyle,
linewidth=line_width,
label=label,
color='orange'
)
def draw_gini_blame_over_time(
axis: axes.SubplotBase, blame_data: pd.DataFrame,
unique_rev_strs: tp.List[str], consider_in_interactions: bool,
consider_out_interactions: bool, line_width: float
) -> None:
"""
Draws the gini coefficients of the blame interactions over time.
Args:
axis: axis to draw to
blame_data: blame data of the base plot
consider_in_interactions: True, IN interactions should be included
consider_out_interactions: True, OUT interactions should be included
line_width: line width of the plot lines
"""
if consider_in_interactions and consider_out_interactions:
data_selector = 'HEAD_Interactions'
linestyle = '-'
label = "Interactions"
elif consider_in_interactions:
data_selector = 'IN_HEAD_Interactions'
linestyle = '--'
label = "IN Interactions"
elif consider_out_interactions:
data_selector = 'OUT_HEAD_Interactions'
linestyle = ':'
label = "OUT Interactions"
else:
raise AssertionError(
"At least one of the in/out interaction needs to be selected"
)
gini_coefficients = []
for time_id in blame_data.time_id:
distribution = blame_data[blame_data.time_id <= time_id
][data_selector].sort_values(ascending=True)
gini_coefficients.append(gini_coefficient(distribution))
axis.plot(
unique_rev_strs,
gini_coefficients,
linestyle=linestyle,
linewidth=line_width,
label=label,
color='#cc0099'
)
class BlameGiniOverTime(Plot, plot_name="b_gini_overtime"):
"""
Plots the gini coefficient over time for a project.
This shows how the distribution of the interactions/churn changes of time.
"""
NAME = 'b_gini_overtime'
def plot(self, view_mode: bool) -> None:
style.use(self.plot_config.style())
case_study: CaseStudy = self.plot_kwargs["case_study"]
project_name = case_study.project_name
commit_map: CommitMap = get_commit_map(project_name)
data = BlameInteractionDatabase.get_data_for_project(
project_name, [
"revision", "time_id", "IN_HEAD_Interactions",
"OUT_HEAD_Interactions", "HEAD_Interactions"
], commit_map, case_study
)
data = filter_non_code_changes(data, project_name)
if data.empty:
raise PlotDataEmpty
data.sort_values(by=['time_id'], inplace=True)
fig = plt.figure()
fig.subplots_adjust(top=0.95, hspace=0.05, right=0.95, left=0.07)
grid_spec = fig.add_gridspec(3, 1)
main_axis = fig.add_subplot(grid_spec[:-1, :])
main_axis.set_title("Gini coefficient over the project lifetime")
main_axis.get_xaxis().set_visible(False)
churn_axis = fig.add_subplot(grid_spec[2, :], sharex=main_axis)
unique_rev_strs: tp.List[str] = [rev.hash for rev in data['revision']]
draw_gini_blame_over_time(
main_axis, data, unique_rev_strs, True, True,
self.plot_config.line_width()
)
draw_gini_blame_over_time(
main_axis, data, unique_rev_strs, True, False,
self.plot_config.line_width()
)
draw_gini_blame_over_time(
main_axis, data, unique_rev_strs, False, True,
self.plot_config.line_width()
)
draw_gini_churn_over_time(
main_axis, data, unique_rev_strs, project_name, commit_map, True,
True, self.plot_config.line_width()
)
draw_gini_churn_over_time(
main_axis, data, unique_rev_strs, project_name, commit_map, True,
False, self.plot_config.line_width()
)
draw_gini_churn_over_time(
main_axis, data, unique_rev_strs, project_name, commit_map, False,
True, self.plot_config.line_width()
)
main_axis.legend()
main_axis.set_ylim((0., 1.))
draw_interaction_code_churn(churn_axis, data, project_name, commit_map)
# Adapt axis to draw nicer plots
for x_label in churn_axis.get_xticklabels():
x_label.set_fontsize(self.plot_config.x_tick_size())
x_label.set_rotation(270)
x_label.set_fontfamily('monospace')
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
raise NotImplementedError
class BlameGiniOverTimeGenerator(
PlotGenerator,
generator_name="gini-overtime-plot",
options=[REQUIRE_MULTI_CASE_STUDY]
):
"""Generates gini-overtime plot(s) for the selected case study(ies)."""
def generate(self) -> tp.List[Plot]:
case_studies: tp.List[CaseStudy] = self.plot_kwargs.pop("case_study")
return [
BlameGiniOverTime(
self.plot_config, case_study=cs, **self.plot_kwargs
) for cs in case_studies
]
|
nilq/baby-python
|
python
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow import conf
from airflow.upgrade.rules.base_rule import BaseRule
from airflow.utils.module_loading import import_string
LOGS = [
(
"airflow.providers.amazon.aws.log.s3_task_handler.S3TaskHandler",
"airflow.utils.log.s3_task_handler.S3TaskHandler"
),
(
'airflow.providers.amazon.aws.log.cloudwatch_task_handler.CloudwatchTaskHandler',
'airflow.utils.log.cloudwatch_task_handler.CloudwatchTaskHandler'
),
(
'airflow.providers.elasticsearch.log.es_task_handler.ElasticsearchTaskHandler',
'airflow.utils.log.es_task_handler.ElasticsearchTaskHandler'
),
(
"airflow.providers.google.cloud.log.stackdriver_task_handler.StackdriverTaskHandler",
"airflow.utils.log.stackdriver_task_handler.StackdriverTaskHandler"
),
(
"airflow.providers.google.cloud.log.gcs_task_handler.GCSTaskHandler",
"airflow.utils.log.gcs_task_handler.GCSTaskHandler"
),
(
"airflow.providers.microsoft.azure.log.wasb_task_handler.WasbTaskHandler",
"airflow.utils.log.wasb_task_handler.WasbTaskHandler"
)
]
class TaskHandlersMovedRule(BaseRule):
title = "Changes in import path of remote task handlers"
description = (
"The remote log task handlers have been moved to the providers "
"directory and into their respective providers packages."
)
def check(self):
logging_class = conf.get("core", "logging_config_class", fallback=None)
if logging_class:
config = import_string(logging_class)
configured_path = config['handlers']['task']['class']
for new_path, old_path in LOGS:
if configured_path == old_path:
return [
"This path : `{old}` should be updated to this path: `{new}`".format(old=old_path,
new=new_path)
]
|
nilq/baby-python
|
python
|
from InsertionSort import insertionSort
import math
def bucketSort(customList):
numBuckets = round(math.sqrt(len(customList)))
maxValue = max(customList)
arr = []
# Creating buckets
for i in range(numBuckets):
arr.append([])
# Shifting elemets to buckets
for j in range(customList):
index_b = math.ceil(j * numBuckets / maxValue)
arr[index_b - 1].append(j)
# Sorting the elements in bucket
for i in range(numBuckets):
arr[i] = insertionSort(arr[i])
# Finally bring the elements form bucket into the list
k = 0
for i in range(numBuckets):
for j in range(len(arr[i])):
customList[k] = arr[i][j]
k += 1
print(customList)
bucketSort([11, 98, 23, 78, 0, 22, 14, 7, 61, 43, 86, 65])
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import re
import requests
from datetime import datetime, timedelta
from jobs import AbstractJob
class Vaernesekspressen(AbstractJob):
def __init__(self, conf):
self.airport_id = 113 # Vaernes is the the only supported destionation
self.from_stop = conf["from_stop"]
self.interval = conf["interval"]
self.timeout = conf.get("timeout")
self.base_url = conf.get("base_url", "https://www.vaernesekspressen.no")
self.now = datetime.now
def _find_stop_id(self):
url = "{}/Umbraco/Api/TicketOrderApi/GetStops".format(self.base_url)
params = {"routeId": 31} # There is only one route
r = requests.get(url, params=params, timeout=self.timeout)
r.raise_for_status()
for stop in r.json():
if stop["Name"].lower() == self.from_stop.lower():
return stop["Id"]
raise ValueError('Could not find ID for stop "{}"'.format(self.from_stop))
def _timestamp(self, dt, tz):
# I hate Python.
utc_offset = timedelta(0)
if tz == "CET":
utc_offset = timedelta(hours=1)
elif tz == "CEST":
utc_offset = timedelta(hours=2)
else:
raise ValueError('Unexpected time zone "{}"'.format(tz))
epoch = datetime(1970, 1, 1)
return (dt - utc_offset - epoch).total_seconds()
def _parse_time(self, date):
parts = date.rsplit(" ", 1)
tz = parts[1]
dt = datetime.strptime(parts[0], "%Y-%m-%d %H:%M:%S.0")
return int(self._timestamp(dt, tz))
def _departures(self, stop_id, dt):
url = "{}/Umbraco/Api/TicketOrderApi/GetJourneys".format(self.base_url)
data = {
"From": str(stop_id),
"To": str(self.airport_id),
"Route": "31",
"Date": dt.strftime("%d.%m.%Y"),
"Adult": "1",
"Student": "0",
"Child": "0",
"Baby": "0",
"Senior": "0",
"isRoundTrip": False,
}
r = requests.post(url, json=data, timeout=self.timeout)
r.raise_for_status()
return [
{
"stop_name": self._trim_name(d["Start"]["Name"]),
"destination_name": self._trim_name(d["End"]["Name"]),
"departure_time": str(self._parse_time(d["DepartureTime"])),
}
for d in r.json()
]
def _trim_name(self, name):
return re.sub(r"^FB \d+ ", "", name)
def get(self):
stop_id = self._find_stop_id()
now = self.now()
departures = self._departures(stop_id, now)
if len(departures) < 2:
# Few departures today, include tomorrow's departures
tomorrow = (now + timedelta(days=1)).date()
departures += self._departures(stop_id, tomorrow)
from_ = "N/A"
to = "N/A"
if len(departures) > 0:
from_ = departures[0]["stop_name"]
to = departures[0]["destination_name"]
return {"from": from_, "to": to, "departures": departures}
|
nilq/baby-python
|
python
|
import jax.numpy as jnp
from jax import vmap, grad, nn, tree_util, jit, ops, custom_vjp
from functools import partial
from jax.experimental import ode
from collections import namedtuple
GradientFlowState = namedtuple('GradientFlowState', ['B', 's', 'z'])
def gradient_flow(loss_fn, init_params, inputs, labels, t_final,
rtol=1.4e-8, atol=1.4e-8, mxstep=jnp.inf):
return _gradient_flow(loss_fn, rtol, atol, mxstep, init_params,
inputs, labels, t_final)
@partial(custom_vjp, nondiff_argnums=(0, 1, 2, 3))
def _gradient_flow(loss_fn, rtol, atol, mxstep, init_params, inputs, labels, t_final):
def _dynamics(params, _):
grads, _ = grad(loss_fn, has_aux=True)(params, inputs, labels)
return -grads
trajectory = ode.odeint(
jit(_dynamics), init_params,
jnp.asarray([0., t_final], dtype=jnp.float32),
rtol=rtol, atol=atol, mxstep=mxstep
)
return trajectory[-1]
def _gradient_flow_fwd(loss_fn, rtol, atol, mxstep, init_params, inputs, labels, t_final):
M, N = inputs.shape[0], init_params.shape[0]
gram = jnp.dot(inputs, inputs.T)
init_logits = jnp.matmul(inputs, init_params.T)
diag_indices = jnp.diag_indices(M)
diag_indices_interlaced = (diag_indices[0], slice(None), diag_indices[1])
def _dynamics(state, _):
preds = nn.softmax(init_logits - jnp.matmul(gram, state.s), axis=-1)
A = (vmap(jnp.diag)(preds) - vmap(jnp.outer)(preds, preds)) / M
# Update of B
cross_prod = jnp.einsum('ikn,im,mjnl->ijkl', A, gram, state.B)
dB = ops.index_add(-cross_prod, diag_indices, A,
indices_are_sorted=True, unique_indices=True)
# Update of s
ds = (preds - labels) / M
# Update of z
cross_prod = jnp.einsum('iln,ik,kmjn->imjl', A, gram, state.z)
As = jnp.einsum('ikl,ml->imk', A, state.s)
dz = ops.index_add(cross_prod, diag_indices, As,
indices_are_sorted=True, unique_indices=True)
dz = ops.index_add(dz, diag_indices_interlaced, As,
indices_are_sorted=True, unique_indices=True)
return GradientFlowState(B=dB, s=ds, z=-dz)
init_state = GradientFlowState(
B=jnp.zeros((M, M, N, N)),
s=jnp.zeros((M, N)),
z=jnp.zeros((M, M, M, N))
)
trajectory = ode.odeint(
jit(_dynamics), init_state,
jnp.asarray([0., t_final], dtype=jnp.float32),
rtol=rtol, atol=atol, mxstep=mxstep
)
final_state = tree_util.tree_map(lambda x: x[-1], trajectory)
final_params = init_params - jnp.matmul(final_state.s.T, inputs)
return final_params, (init_params, inputs, labels, final_state, final_params)
def _gradient_flow_bwd(loss_fn, rtol, atol, mxstep, res, grads_test):
init_params, inputs, labels, state, params = res
grads_train, _ = grad(loss_fn, has_aux=True)(params, inputs, labels)
# Projections
inputs_grads_test = jnp.matmul(inputs, grads_test.T)
C = jnp.einsum('ik,ijkl->jl', inputs_grads_test, state.B)
grads_params = grads_test - jnp.matmul(C.T, inputs)
D = jnp.einsum('ik,imjk->jm', inputs_grads_test, state.z)
grads_inputs = -(jnp.matmul(state.s, grads_test)
+ jnp.matmul(C, init_params) + jnp.matmul(D, inputs))
grads_t_final = -jnp.vdot(grads_train, grads_test)
return (grads_params, grads_inputs, None, grads_t_final)
_gradient_flow.defvjp(_gradient_flow_fwd, _gradient_flow_bwd)
|
nilq/baby-python
|
python
|
"""
Crie um programa que aprove um emprestimo bancário, onde o programa leia:
Valor da Casa / salário da pessoa / quantos anos será o pagamento
Calcule o valor da prestação mensal, sabendo que ela não pode ser superior a 30% da renda da pessoa, se passar o
emprestimo será negado
"""
import time
valor_casa = float(input('Valor do imóvel que deseja comprar: '))
salario = float(input('Qual o salário do pagador: '))
anos_pagamento = int(input('Quantos anos para pagar: '))
meses_pagamento = int(input('Quantos meses para pagamento: '))
tempo_pagamento = anos_pagamento * 12 + meses_pagamento
prestacao = valor_casa / tempo_pagamento
print('\nValor do imóvel de R$ {:.2f}, salário R$ {:.2f}, tempo do emprestimo de {} meses.\n'.format(valor_casa, salario, tempo_pagamento))
time.sleep(3)
if prestacao > salario * 0.3:
print('Infelizmente o empréstimo não pode ser concedido, a prestação supera {}{}{} da renda mensal.'.format('\033[36m', '30%', '\033[m'))
else:
print('Podemos conceder o empréstimo para o senhor!!!')
print('A parte da renda que será comprometida é de {}{:.1%}{}.'.format('\033[31m', (prestacao/salario), '\033[m'))
|
nilq/baby-python
|
python
|
"""Core module for own metrics implementation"""
from sklearn.metrics import mean_squared_error
import numpy as np
def rmse(y, y_pred):
return np.sqrt(mean_squared_error(y, y_pred))
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Ballot, Candidate, SubElection, Election, Image, ElectionUser
class CandidateAdmin(admin.StackedInline):
model = Candidate
extra = 0
class SubElectionAdmin(admin.ModelAdmin):
model = SubElection
inlines = [
CandidateAdmin,
]
list_filter = ('election',)
admin.site.register(Ballot)
admin.site.register(SubElection, SubElectionAdmin)
admin.site.register(Election)
admin.site.register(Image)
admin.site.register(ElectionUser)
|
nilq/baby-python
|
python
|
""" Defines the Note repository """
from models import Note
class NoteRepository:
""" The repository for the note model """
@staticmethod
def get(user_first_name, user_last_name, movie):
""" Query a note by last and first name of the user and the movie's title"""
return Note.query.filter_by(user_first_name=user_first_name, user_last_name=user_last_name, movie=movie).one()
def update(self, user_first_name, user_last_name, movie, note):
""" Update a note """
notation = self.get(user_first_name, user_last_name, movie)
notation.note = note
return notation.save()
@staticmethod
def create(user_first_name, user_last_name, movie, note):
""" Create a new note """
notation = Note(user_first_name=user_first_name, user_last_name=user_last_name, movie=movie, note=note)
return notation.save()
class NoteAllRepository:
@staticmethod
def get(movie):
return Note.query.filter_by(movie=movie).all()
|
nilq/baby-python
|
python
|
prefix = '14IDA:shutter_auto_enable2'
description = 'Shutter 14IDC auto'
target = 0.0
|
nilq/baby-python
|
python
|
"""Pipeline subclass for all multiclass classification pipelines."""
from evalml.pipelines.classification_pipeline import ClassificationPipeline
from evalml.problem_types import ProblemTypes
class MulticlassClassificationPipeline(ClassificationPipeline):
"""Pipeline subclass for all multiclass classification pipelines.
Args:
component_graph (ComponentGraph, list, dict): ComponentGraph instance, list of components in order, or dictionary of components.
Accepts strings or ComponentBase subclasses in the list.
Note that when duplicate components are specified in a list, the duplicate component names will be modified with the
component's index in the list. For example, the component graph
[Imputer, One Hot Encoder, Imputer, Logistic Regression Classifier] will have names
["Imputer", "One Hot Encoder", "Imputer_2", "Logistic Regression Classifier"]
parameters (dict): Dictionary with component names as keys and dictionary of that component's parameters as values.
An empty dictionary or None implies using all default values for component parameters. Defaults to None.
custom_name (str): Custom name for the pipeline. Defaults to None.
random_seed (int): Seed for the random number generator. Defaults to 0.
Example:
>>> pipeline = MulticlassClassificationPipeline(component_graph=["Simple Imputer", "Logistic Regression Classifier"],
... parameters={"Logistic Regression Classifier": {"penalty": "elasticnet",
... "solver": "liblinear"}},
... custom_name="My Multiclass Pipeline")
...
>>> assert pipeline.custom_name == "My Multiclass Pipeline"
>>> assert pipeline.component_graph.component_dict.keys() == {'Simple Imputer', 'Logistic Regression Classifier'}
The pipeline parameters will be chosen from the default parameters for every component, unless specific parameters
were passed in as they were above.
>>> assert pipeline.parameters == {
... 'Simple Imputer': {'impute_strategy': 'most_frequent', 'fill_value': None},
... 'Logistic Regression Classifier': {'penalty': 'elasticnet',
... 'C': 1.0,
... 'n_jobs': -1,
... 'multi_class': 'auto',
... 'solver': 'liblinear'}}
"""
problem_type = ProblemTypes.MULTICLASS
"""ProblemTypes.MULTICLASS"""
|
nilq/baby-python
|
python
|
import os
import sys
import time
import random
import string
import datetime
import concurrent.futures
# Import function from module
from .program_supplementals import enter_key_only, exception_translator
# Import function from 3rd party module
from netmiko import ConnectHandler
def file_output(ssh_results, ssh_success, ssh_failed):
# Get the current path of the running Python file
current_path = os.path.dirname(os.path.realpath(__file__))
# Prompt user for
target_path = input("\nEnter the target path or leave it blank to set the default path [" + current_path + "]: ")
# If target_path is blank, fill it with a default directory name
if bool(target_path == ""):
target_path = "Malas_SSH_outputs"
try:
# Create a new directory if not exists yet on the target path to contains all SSH output file(s)
if bool(os.path.exists(target_path)) == False:
os.makedirs(target_path)
# Loop for every result in the list
for ssh_result in ssh_results:
# Give a unique key for the output file
unique_key = "".join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
# Get the current date and time
present = datetime.datetime.now().strftime("_on_%Y-%m-%d_at_%H.%M")
# Merge target path with the file name and its extension
complete_path = os.path.join(target_path, ssh_result[0] + present + "_[" + unique_key + "].txt")
# Open the file with write permission
with open(complete_path, "w") as file:
# Write the SSH outputs to the file
file.write("%s" % ssh_result[1])
# SSH attempt results
print("\nSSH remote configuration success: " + str(ssh_success) + " host(s)")
print("SSH remote configuration failed: " + str(ssh_failed) + " host(s)")
# target_path is the default directory name
if bool(target_path == "Malas_SSH_outputs"):
print("\nPASS: The SSH output file(s) are stored in the path \'" + current_path + "\' inside the directory \'" + target_path + "\' successfully")
# target_path is user-defined
else:
print("\nPASS: The SSH output file(s) are stored in the path \'" + target_path + "\' successfully")
print("EXIT: Please review the SSH output file(s) to confirm the configured configuration, thank you!")
except:
# Execute exception_translator
exception_explained = exception_translator()
# Print the raised exception error messages values
print("\nFAIL: " + exception_explained[0] + ":\n" + exception_explained[1])
# Repeat execute file_output and then pass these values
file_output(ssh_results, ssh_success, ssh_failed)
def thread_processor(threads):
# Initial variables
ssh_results = []
ssh_success = 0
ssh_failed = 0
# Loop for every result from ssh-threading process
for thread in threads:
# Store the thread results values
ssh_result = thread.result()
# Failed SSH attempts contain 2 values in tuple formats
if isinstance(ssh_result[1], tuple):
# Merge raised exception error name and explanation
result_concatenated = "FAIL: " + ssh_result[1][0] + "\n\n" + ssh_result[1][1]
# Store the raised exception error messages values in the same index
ssh_results.append((ssh_result[0], result_concatenated))
# Increment of failed SSH attempts
ssh_failed += 1
else:
# Store the raised exception error messages values
ssh_results.append(ssh_result)
# Increment of success SSH attempts
ssh_success += 1
try:
# Execute user confirmation to create output file(s)
print("\nPress \'Enter\' to create the SSH output file(s) or \'CTRL+C\' to end the program", end = "", flush = True)
# Expect the user to press Enter key
enter_key_only()
# Execute file_output
file_output(ssh_results, ssh_success, ssh_failed)
# Stop process by keyboard (e.g. CTRL+C)
except KeyboardInterrupt:
# SSH attempt results
print("\n\nSSH remote configuration success: " + str(ssh_success) + " host(s)")
print("SSH remote configuration failed: " + str(ssh_failed) + " host(s)")
print("\nEXIT: Please review the SSH outputs to confirm the configured configuration, thank you!")
# Exit program
sys.exit()
def output_processor(output, command, stopwatch):
# Remote configuration stopwatch end
ssh_processed = "\'%.2f\'" % (time.time() - stopwatch) + " secs"
# Process the output according to its command type
if command == "send_command":
# No output process
final_output = output
elif command == "send_config_set":
# Split output into a list
disintegrate_output = output.split("\n")
# Remove the unnecessary lines
final_output = "\n".join(disintegrate_output[1:-1])
# Pass these values
return final_output, ssh_processed
def connection_ssh(dev, cmd, gdf, ip, usr, pwd, cfg):
# Strip newline at the end of device type, command type, IP address, username, and password
device = dev.rstrip("\n")
command = cmd.rstrip("\n")
ip_addr = ip.rstrip("\n")
username = usr.rstrip("\n")
password = pwd.rstrip("\n")
try:
# Remote configuration stopwatch start
stopwatch = time.time()
# Define the device type, the credential information, and the delay value to log in to the remote host
session = {
"device_type": device,
"host": ip_addr,
"username": username,
"password": password,
"global_delay_factor": gdf
}
# SSH to the remote host
remote = ConnectHandler(**session)
# Execute every command in the configuration file according to its command type
if command == "send_command":
output = remote.send_command(cfg)
# Execute output_processor and retrive values
final_output, ssh_processed = output_processor(output, command, stopwatch)
elif command == "send_config_set":
output = remote.send_config_set(cfg)
# Execute output_processor and retrive values
final_output, ssh_processed = output_processor(output, command, stopwatch)
# Output's bracket and print the output
print("\n\n \ Remote host \'" + ip_addr + "\' processed for " + ssh_processed + "\n \___________________________________________________________________\n\n" + final_output, end="")
# Pass values to threading result
return ip_addr, final_output
except:
# Execute exception_translator
exception_explained = exception_translator()
# Output's bracket and print the output
print("\n\n \ Remote host \'" + ip_addr + "\' failed to configure\n \___________________________________________________________________\n\nFAIL: " + exception_explained[0] + "\n\n" + exception_explained[1], end = "")
# Pass values to threading result
return ip_addr, exception_explained
def connection_futures(device, command, delay, ip_addr_list, username_list, password_list, command_list):
# Execute connection_ssh. Progress dot with threading capability
print("\nConcurrently configuring per", min(32, os.cpu_count() + 4), "hosts. Please wait", end = "", flush = True)
# SSH-threading stopwatch start
threading_start = time.time()
# Suppress raised exception error messages outputs
sys.stderr = os.devnull
# SSH-threading process
with concurrent.futures.ThreadPoolExecutor() as executor:
# Initial variables
threads = []
ssh_attempts = 0
# Loop for every IP address, username, and password in the list
for ip_addr, username, password in zip(ip_addr_list, username_list, password_list):
# Increment of SSH attempts
ssh_attempts += 1
# Execute configuration over SSH for every IP address, username, and password in the list concurrently
threads.append(executor.submit(connection_ssh, dev = device, cmd = command, gdf = delay, ip = ip_addr, usr = username, pwd = password, cfg = command_list))
# Progress dot
print(".", end = "", flush = True)
# Unsuppress raised exception error messages outputs
sys.stderr = sys.__stderr__
print("\n\n \ Completed")
print(" \___________________________________________________________________\n")
# SSH attempt results and ping-threading stopwatch end
print("SSH-threading for " + str(ssh_attempts) + " host(s) processed for:", "%.2f" % (time.time() - threading_start), "secs")
# Execute thread_processor
thread_processor(threads)
|
nilq/baby-python
|
python
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import os
import json
import random
try:
# python <= 2.7
TYPE_TEXT_STRING = (str, unicode)
except NameError:
TYPE_TEXT_STRING = (str, )
try:
from unittest import mock
from unittest.mock import Mock
except ImportError:
# python < 3.3
import mock
from mock import Mock
from azure.core.exceptions import (
HttpResponseError,
ResourceNotFoundError,
ClientAuthenticationError,
ServiceResponseError
)
from azure.cognitiveservices.inkrecognizer import (
InkStrokeKind,
InkRecognitionUnitKind,
ShapeKind,
InkPointUnit,
ApplicationKind,
ServiceVersion
)
from azure.cognitiveservices.inkrecognizer import InkRecognizerClient
from azure.cognitiveservices.inkrecognizer import (
Point,
Rectangle,
InkRecognitionUnit,
InkBullet,
InkDrawing,
Line,
Paragraph,
InkWord,
WritingRegion,
ListItem,
InkRecognitionRoot
)
RAISE_ONLINE_TEST_ERRORS = False
URL = ""
CREDENTIAL = Mock(name="FakeCredential", get_token="token")
def online_test(func):
def wrapper(*args, **kw):
if URL == "" or isinstance(CREDENTIAL, Mock):
if RAISE_ONLINE_TEST_ERRORS:
raise ValueError("Please fill URL and CREDENTIAL before running online tests.")
else:
return
return func(*args, **kw)
return wrapper
def fake_run(self, request, **kwargs):
return Mock(http_response=(json.loads(request.data), kwargs["headers"], kwargs))
def pass_response(response, config):
return response
def parse_result(result_filename):
json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data", result_filename)
client = InkRecognizerClient(URL, CREDENTIAL)
with open(json_path, "r") as f:
raw_recognition_result = f.read()
response = Mock(status_code=200, headers={}, body=lambda: raw_recognition_result.encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
root = client.recognize_ink([])
return root
class TestClient:
def test_set_azure_general_arguments(self):
def pipeline_client_checker(base_url, transport, config):
assert base_url == URL
assert config.logging_policy.enable_http_logger is True
assert config.retry_policy.total_retries == 3
from azure.core.pipeline.transport import HttpTransport
assert isinstance(transport, HttpTransport)
def fake_pipeline_client_constructor(*args, **kw):
pipeline_client_checker(kw["base_url"], kw["transport"], kw["config"])
with mock.patch("azure.core.PipelineClient.__init__", fake_pipeline_client_constructor):
InkRecognizerClient(URL,
CREDENTIAL,
logging_enable=True,
retry_total=3)
def test_set_ink_recognizer_arguments(self):
client = InkRecognizerClient(URL,
CREDENTIAL,
application_kind=ApplicationKind.DRAWING,
ink_point_unit=InkPointUnit.INCH,
language="zh-cn",
unit_multiple=2.5)
with mock.patch.object(client, "_parse_result", pass_response):
with mock.patch("azure.core.pipeline.Pipeline.run", fake_run):
request_json, headers, kwargs = client.recognize_ink([])
# check ink recognizer arguments
assert request_json["applicationType"] == ApplicationKind.DRAWING.value
assert request_json["unit"] == InkPointUnit.INCH.value
assert request_json["language"] == "zh-cn"
assert request_json["unitMultiple"] == 2.5
def test_set_arguments_in_request(self):
client = InkRecognizerClient(URL,
CREDENTIAL,
application_kind=ApplicationKind.DRAWING,
language="zh-cn")
with mock.patch.object(client, "_parse_result", pass_response):
with mock.patch("azure.core.pipeline.Pipeline.run", fake_run):
request_json, headers, kwargs = client.recognize_ink(
[],
application_kind=ApplicationKind.WRITING,
language = "en-gb",
client_request_id="random_id",
headers={"test_header": "test_header_result"},
timeout=10,
total_retries=5)
# check ink recognizer arguments
assert request_json["applicationType"] == ApplicationKind.WRITING.value
assert request_json["language"] == "en-gb"
# check azure general arguments
assert headers["test_header"] == "test_header_result"
assert headers["x-ms-client-request-id"] == "random_id"
assert kwargs["connection_timeout"] == 10
assert kwargs["total_retries"] == 5
def test_consume_ink_stroke_list(self):
point = Mock(x=0, y=0)
stroke = Mock(id=0, points=[point], language="python", kind=InkStrokeKind.DRAWING)
ink_stroke_list = [stroke] * 3
client = InkRecognizerClient(URL, CREDENTIAL)
with mock.patch.object(client, "_parse_result", pass_response):
with mock.patch("azure.core.pipeline.Pipeline.run", fake_run):
request_json, headers, kwargs = client.recognize_ink(ink_stroke_list)
# check number of strokes, point values and other features
assert len(request_json["strokes"]) == 3
for s in request_json["strokes"]:
assert len(s["points"]) == 1
assert s["points"][0]["x"] == 0
assert s["points"][0]["y"] == 0
assert s["id"] == 0
assert s["language"] == "python"
assert s["kind"] == InkStrokeKind.DRAWING.value
def test_parse_http_response(self):
client = InkRecognizerClient(URL, CREDENTIAL)
# 401: ClientAuthenticationError
response = Mock(status_code=401, headers={}, body=lambda: "HTTP STATUS: 401".encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
try:
root = client.recognize_ink([])
except ClientAuthenticationError:
pass # expected
else:
raise AssertionError("Should raise ClientAuthenticationError here")
# 404: ResourceNotFoundError
response = Mock(status_code=404, headers={}, body=lambda: "HTTP STATUS: 404".encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
try:
root = client.recognize_ink([])
except ResourceNotFoundError:
pass # expected
else:
raise AssertionError("Should raise ResourceNotFoundError here")
# valid response from server
json_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "test_data", "hello_world_result.json")
with open(json_path, "r") as f:
recognition_json = f.read()
response = Mock(status_code=200, headers={}, body=lambda: recognition_json.encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
root = client.recognize_ink([]) # should pass. No need to check result.
# invalid response from server
jobj = json.loads(recognition_json)
jobj["recognitionUnits"].append("random_string")
invalid_recognition_json = json.dumps(jobj)
response = Mock(status_code=200, headers={}, body=lambda: invalid_recognition_json.encode("utf-8"))
with mock.patch.object(client, "_send_request", lambda *args, **kw: response):
try:
root = client.recognize_ink([])
except ServiceResponseError:
pass # expected
else:
raise AssertionError("Should raise ServiceResponseError here")
class TestModels:
def test_unit_ink_recognition_unit(self):
root = parse_result("hello_world_result.json")
units = root._units
assert len(units) > 0
for unit in units:
assert isinstance(unit.id, int)
assert isinstance(unit.bounding_box, Rectangle)
assert isinstance(unit.rotated_bounding_box, list)
assert isinstance(unit.stroke_ids, list)
assert isinstance(unit.children, list)
assert isinstance(unit.parent, (InkRecognitionUnit, InkRecognitionRoot))
for point in unit.rotated_bounding_box:
assert isinstance(point, Point)
for stroke_id in unit.stroke_ids:
assert isinstance(stroke_id, int)
for child in unit.children:
assert isinstance(child, InkRecognitionUnit)
def test_unit_ink_bullet(self):
root = parse_result("list_result.json")
bullets = root.ink_bullets
assert len(bullets) > 0
for bullet in bullets:
assert bullet.kind == InkRecognitionUnitKind.INK_BULLET
assert isinstance(bullet.recognized_text, TYPE_TEXT_STRING)
assert isinstance(bullet.parent, Line)
assert len(bullet.children) == 0
def test_unit_ink_drawing(self):
root = parse_result("drawings_result.json")
drawings = root.ink_drawings
assert len(drawings) > 0
for drawing in drawings:
assert drawing.kind == InkRecognitionUnitKind.INK_DRAWING
assert isinstance(drawing.center, Point)
assert isinstance(drawing.confidence, (int, float))
assert isinstance(drawing.recognized_shape, ShapeKind)
assert isinstance(drawing.rotated_angle, (int, float))
assert isinstance(drawing.points, list)
assert isinstance(drawing.alternates, list)
for point in drawing.points:
assert isinstance(point, Point)
for alt in drawing.alternates:
assert isinstance(alt, InkDrawing)
assert alt.alternates == []
assert isinstance(drawing.parent, InkRecognitionRoot)
assert len(drawing.children) == 0
def test_unit_line(self):
root = parse_result("hello_world_result.json")
lines = root.lines
assert len(lines) > 0
for line in lines:
assert line.kind == InkRecognitionUnitKind.LINE
assert isinstance(line.recognized_text, TYPE_TEXT_STRING)
assert isinstance(line.alternates, list)
for alt in line.alternates:
assert isinstance(alt, TYPE_TEXT_STRING)
assert isinstance(line.parent, (Paragraph, ListItem))
for child in line.children:
assert isinstance(child, (InkBullet, InkWord))
def test_unit_paragraph(self):
root = parse_result("list_result.json")
paragraphs = root.paragraphs
assert len(paragraphs) > 0
for paragraph in paragraphs:
assert paragraph.kind == InkRecognitionUnitKind.PARAGRAPH
assert isinstance(paragraph.recognized_text, TYPE_TEXT_STRING)
assert isinstance(paragraph.parent, WritingRegion)
for child in paragraph.children:
assert isinstance(child, (Line, ListItem))
def test_unit_ink_word(self):
root = parse_result("hello_world_result.json")
words = root.ink_words
assert len(words) > 0
for word in words:
assert word.kind == InkRecognitionUnitKind.INK_WORD
assert isinstance(word.recognized_text, TYPE_TEXT_STRING)
assert isinstance(word.alternates, list)
for alt in word.alternates:
assert isinstance(alt, TYPE_TEXT_STRING)
assert isinstance(word.parent, Line)
assert len(word.children) == 0
def test_unit_writing_region(self):
root = parse_result("list_result.json")
writing_regions = root.writing_regions
assert len(writing_regions) > 0
for writing_region in writing_regions:
assert writing_region.kind == InkRecognitionUnitKind.WRITING_REGION
assert isinstance(writing_region.recognized_text, TYPE_TEXT_STRING)
assert isinstance(writing_region.parent, InkRecognitionRoot)
for child in writing_region.children:
assert isinstance(child, Paragraph)
def test_unit_list_item(self):
root = parse_result("list_result.json")
list_items = root.list_items
assert len(list_items) > 0
for list_item in list_items:
assert list_item.kind == InkRecognitionUnitKind.LIST_ITEM
assert isinstance(list_item.recognized_text, TYPE_TEXT_STRING)
assert isinstance(list_item.parent, Paragraph)
for child in list_item.children:
assert isinstance(child, Line)
class TestSendRequests:
@online_test
def test_recognize_ink_with_empty_ink_stroke_list(self):
client = InkRecognizerClient(URL, CREDENTIAL)
root = client.recognize_ink([])
words = root.ink_words
assert not words
drawings = root.ink_drawings
assert not drawings
bullets = root.ink_bullets
assert not bullets
@online_test
def test_recognize_ink(self):
points = []
for i in range(10):
points.append(Mock(x=i, y=i))
stroke = Mock(id=i, points=points, language="en-US")
ink_stroke_list = [stroke]
client = InkRecognizerClient(URL, CREDENTIAL)
root = client.recognize_ink(ink_stroke_list)
words = root.ink_words
drawings = root.ink_drawings
bullets = root.ink_bullets
assert len(words) + len(drawings) + len(bullets) > 0
|
nilq/baby-python
|
python
|
"""
Module containing character class for use
within world.
"""
from abc import ABC
from .. import entity
class Character(entity.Entity):
"""
Abstract class representing a character within a world.
"""
pass
if __name__ == "__main__":
pass
|
nilq/baby-python
|
python
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Convert raw PASCAL dataset to TFRecord for object_detection.
Example usage:
python object_detection/dataset_tools/create_pascal_tf_record.py \
--data_dir=/home/user/VOCdevkit \
--output_dir=/home/user
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import io
import logging
import os
from lxml import etree
import PIL.Image
import tensorflow as tf
import glob
import random
import dataset_util
import xml.etree.ElementTree as ET
flags = tf.app.flags
flags.DEFINE_string(
'data_dir', '', 'Root directory to raw PASCAL VOC dataset.')
flags.DEFINE_string('images_dir', 'images',
'Name of images directory.')
flags.DEFINE_string('annotations_dir', 'xml',
'Name of annotations directory.')
flags.DEFINE_string('output_dir', '', 'Path to output TFRecord')
# flags.DEFINE_integer(
# 'ratio', '7', 'Ratio to split data to train set and val set. Default is train 7/ val 3')
flags.DEFINE_boolean('ignore_difficult_instances', False, 'Whether to ignore '
'difficult instances')
FLAGS = flags.FLAGS
def dict_to_tf_example(data,
image_path,
label_map_dict,
ignore_difficult_instances=False,
image_subdirectory='images'):
"""Convert XML derived dict to tf.Example proto.
Notice that this function normalizes the bounding box coordinates provided
by the raw data.
Args:
data: dict holding PASCAL XML fields for a single image (obtained by
running dataset_util.recursive_parse_xml_to_dict)
image_path: Full path to image file
label_map_dict: A map from string label names to integers ids.
ignore_difficult_instances: Whether to skip difficult instances in the
dataset (default: False).
image_subdirectory: String specifying subdirectory within the
PASCAL dataset directory holding the actual image data.
Returns:
example: The converted tf.Example.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
# img_path = os.path.join(
# data['folder'], image_subdirectory, data['filename'])
# full_path = os.path.join(dataset_directory, img_path)
full_path = image_path
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
width = int(data['size']['width'])
height = int(data['size']['height'])
filename = full_path.split('/')[-1]
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
if 'object' in data:
for obj in data['object']:
difficult = False # bool(int(obj['difficult']))
if ignore_difficult_instances and difficult:
continue
if obj['name'] not in label_map_dict:
continue
difficult_obj.append(int(difficult))
xmin.append(float(obj['bndbox']['xmin']) / width)
ymin.append(float(obj['bndbox']['ymin']) / height)
xmax.append(float(obj['bndbox']['xmax']) / width)
ymax.append(float(obj['bndbox']['ymax']) / height)
classes_text.append(obj['name'].encode('utf8'))
classes.append(label_map_dict[obj['name']])
# truncated.append(int(obj['truncated']))
truncated.append(0)
# poses.append(obj['pose'].encode('utf8'))
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
filename.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
filename.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def background_tf_example(
image_path,
):
"""
Args:
image_path: Full path to image file
Returns:
example: The converted tf.Example.
"""
full_path = image_path
with tf.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = PIL.Image.open(encoded_jpg_io)
if image.format != 'JPEG':
raise ValueError('Image format not JPEG')
key = hashlib.sha256(encoded_jpg).hexdigest()
filename = full_path.split('/')[-1]
width = image.width
height = image.height
xmin = []
ymin = []
xmax = []
ymax = []
classes = []
classes_text = []
truncated = []
poses = []
difficult_obj = []
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(
filename.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(
filename.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
'image/object/difficult': dataset_util.int64_list_feature(difficult_obj),
'image/object/truncated': dataset_util.int64_list_feature(truncated),
'image/object/view': dataset_util.bytes_list_feature(poses),
}))
return example
def create_tf_record(images_path, output_path, images_dir_name='images', annotation_dir_name='xml'):
# label_map_dict = {
# "person": 1,
# "face": 2
# }
label_map_dict = {'person': 1, 'face': 2, 'potted plant': 3, 'tvmonitor': 4, 'chair': 5, 'microwave': 6, 'refrigerator': 7, 'book': 8, 'clock': 9, 'vase': 10, 'dining table': 11, 'bear': 12, 'bed': 13, 'stop sign': 14, 'truck': 15, 'car': 16, 'teddy bear': 17, 'skis': 18, 'oven': 19, 'sports ball': 20, 'baseball glove': 21, 'tennis racket': 22, 'handbag': 23, 'backpack': 24, 'bird': 25, 'boat': 26, 'cell phone': 27, 'train': 28, 'sandwich': 29, 'bowl': 30, 'surfboard': 31, 'laptop': 32, 'mouse': 33, 'keyboard': 34, 'bus': 35, 'cat': 36, 'airplane': 37, 'zebra': 38, 'tie': 39, 'traffic light': 40, 'apple': 41, 'baseball bat': 42, 'knife': 43, 'cake': 44, 'wine glass': 45, 'cup': 46, 'spoon': 47, 'banana': 48, 'donut': 49, 'sink': 50, 'toilet': 51, 'broccoli': 52, 'skateboard': 53, 'fork': 54, 'carrot': 55, 'couch': 56, 'remote': 57, 'scissors': 58, 'bicycle': 59, 'sheep': 60, 'bench': 61, 'bottle': 62, 'orange': 63, 'elephant': 64, 'motorcycle': 65, 'horse': 66, 'hot dog': 67, 'frisbee': 68, 'umbrella': 69, 'dog': 70, 'kite': 71, 'pizza': 72, 'fire hydrant': 73, 'suitcase': 74, 'cow': 75, 'giraffe': 76, 'snowboard': 77, 'parking meter': 78, 'toothbrush': 79, 'toaster': 80, 'hair drier': 81, 'pottedplant': 82, 'sofa': 83, 'diningtable': 84, 'motorbike': 85, 'aeroplane': 86}
logging.info('Creating {}'.format(output_path))
writer = tf.python_io.TFRecordWriter(output_path)
for idx in range(len(images_path)):
if idx % 100 == 0:
logging.info('On image %d of %d', idx, len(images_path))
# xml_path = xmls_path[idx]
image_path = images_path[idx]
xml_path = image_path.replace(
'/{}/'.format(images_dir_name), '/{}/'.format(annotation_dir_name))
xml_path = xml_path.replace('.jpg', '.xml')
if os.path.exists(xml_path):
# print(xml_path)
tree = ET.parse(xml_path)
xml = tree.getroot()
data = dataset_util.recursive_parse_xml_to_dict(xml)['annotation']
tf_example = dict_to_tf_example(data, image_path, label_map_dict)
writer.write(tf_example.SerializeToString())
else:
continue
tf_example = background_tf_example(image_path)
writer.write(tf_example.SerializeToString())
writer.close()
def main(_):
data_dir = FLAGS.data_dir
# load list image files and xml files
images_dir = os.path.join(data_dir, FLAGS.images_dir)
print(data_dir)
print(images_dir)
images_path = glob.glob(os.path.join(images_dir, '*.jpg'))
random.seed(42)
random.shuffle(images_path)
# set_name = data_dir.split(os.sep)[-1]
if str(data_dir).endswith(os.sep):
set_name = os.path.split(data_dir)[-2]
else:
set_name = os.path.split(data_dir)[-1]
print("dataset contain: {} images".format(len(images_path)))
tfrecord_path = os.path.join(FLAGS.output_dir, '{}.record'.format(set_name))
print('saved data at: ', tfrecord_path)
create_tf_record(images_path, tfrecord_path, images_dir_name=FLAGS.images_dir, annotation_dir_name=FLAGS.annotations_dir)
if __name__ == '__main__':
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.INFO)
tf.app.run()
|
nilq/baby-python
|
python
|
import unittest
from pygments import lexers, token
from gviewer.util import pygmentize, _join
class TestUtil(unittest.TestCase):
def test_pygmentize(self):
python_content = """
import unittest
class Pygmentize(object):
pass"""
result = pygmentize(python_content, lexers.PythonLexer())
self.assertEqual(len(result), 4)
self.assertIn(
(token.Token.Keyword.Namespace, u'import'),
result[0])
self.assertIn(
(token.Token.Name.Namespace, u'unittest'),
result[0])
self.assertEqual(result[1], u"")
self.assertIn(
(token.Token.Keyword, u'class'),
result[2])
self.assertIn(
(token.Token.Name.Class, u'Pygmentize'),
result[2])
self.assertIn(
(token.Token.Keyword, u'pass'),
result[3])
def test_join(self):
result = _join([("aaa", "bbb"), ("ccc", "ddd")], "\n")
self.assertEqual(len(result), 1)
self.assertEqual(
result[0], [("aaa", "bbb"), ("ccc", "ddd")])
|
nilq/baby-python
|
python
|
import json
import unittest
from contextlib import contextmanager
@contextmanager
def mock_stderr():
from cStringIO import StringIO
import sys
_stderr = sys.stderr
sys.stderr = StringIO()
try:
yield sys.stderr
finally:
sys.stderr = _stderr
class RegressionIssue109(unittest.TestCase):
"""
logging prints text and traceback to stderr. Then, code in `utils.py` can
not parse output from daemon.py and there are a lot of messages in ST
console with `Non JSON data from daemon`
SHould be tested:
1. content in stderr should be JSON valid
2. content should contains correct data
"""
def test_json_formatter_works_on_jedi_expections(self):
with mock_stderr() as stderr_mock:
from daemon import JediFacade # load class here to mock stderr
JediFacade('print "hello"', 1, 1).get('some')
stderr_content = json.loads(stderr_mock.getvalue())
self.assertEqual(stderr_content['logging'], 'error')
self.assertIn('Traceback (most recent call last):',
stderr_content['content'])
self.assertIn('JediFacade instance has no attribute \'get_some\'',
stderr_content['content'])
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
'''
Skip-thought vectors
'''
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import zip
from builtins import range
from past.utils import old_div
import os
import theano
import theano.tensor as tensor
import pickle as pkl
import numpy
import copy
import nltk
from collections import OrderedDict, defaultdict
from scipy.linalg import norm
from nltk.tokenize import word_tokenize
profile = False
#-----------------------------------------------------------------------------#
# Specify model and table locations here
#-----------------------------------------------------------------------------#
path_to_models = 'models/'
path_to_tables = 'models/'
#-----------------------------------------------------------------------------#
path_to_umodel = path_to_models + 'uni_skip.npz'
path_to_bmodel = path_to_models + 'bi_skip.npz'
def load_model():
"""
Load the model with saved tables
"""
# Load model options
print('Loading model parameters...')
with open('%s.pkl'%path_to_umodel, 'rb') as f:
uoptions = pkl.load(f)
with open('%s.pkl'%path_to_bmodel, 'rb') as f:
boptions = pkl.load(f)
# Load parameters
uparams = init_params(uoptions)
uparams = load_params(path_to_umodel, uparams)
utparams = init_tparams(uparams)
bparams = init_params_bi(boptions)
bparams = load_params(path_to_bmodel, bparams)
btparams = init_tparams(bparams)
# Extractor functions
print('Compiling encoders...')
embedding, x_mask, ctxw2v = build_encoder(utparams, uoptions)
f_w2v = theano.function([embedding, x_mask], ctxw2v, name='f_w2v')
embedding, x_mask, ctxw2v = build_encoder_bi(btparams, boptions)
f_w2v2 = theano.function([embedding, x_mask], ctxw2v, name='f_w2v2')
# Tables
print('Loading tables...')
utable, btable = load_tables()
# Store everything we need in a dictionary
print('Packing up...')
model = {}
model['uoptions'] = uoptions
model['boptions'] = boptions
model['utable'] = utable
model['btable'] = btable
model['f_w2v'] = f_w2v
model['f_w2v2'] = f_w2v2
return model
def load_tables():
"""
Load the tables
"""
words = []
utable = numpy.load(path_to_tables + 'utable.npy', fix_imports=True, encoding='bytes')
btable = numpy.load(path_to_tables + 'btable.npy', fix_imports=True, encoding='bytes')
f = open(path_to_tables + 'dictionary.txt', 'rb')
for line in f:
words.append(line.decode('utf-8').strip())
f.close()
utable = OrderedDict(list(zip(words, utable)))
btable = OrderedDict(list(zip(words, btable)))
return utable, btable
def encode(model, X, use_norm=True, verbose=True, batch_size=128, use_eos=False):
"""
Encode sentences in the list X. Each entry will return a vector
"""
# first, do preprocessing
X = preprocess(X)
# word dictionary and init
d = defaultdict(lambda : 0)
for w in list(model['utable'].keys()):
d[w] = 1
ufeatures = numpy.zeros((len(X), model['uoptions']['dim']), dtype='float32')
bfeatures = numpy.zeros((len(X), 2 * model['boptions']['dim']), dtype='float32')
# length dictionary
ds = defaultdict(list)
captions = [s.split() for s in X]
for i,s in enumerate(captions):
ds[len(s)].append(i)
# Get features. This encodes by length, in order to avoid wasting computation
for k in list(ds.keys()):
if verbose:
print(k)
numbatches = old_div(len(ds[k]), batch_size) + 1
for minibatch in range(numbatches):
caps = ds[k][minibatch::numbatches]
if use_eos:
uembedding = numpy.zeros((k+1, len(caps), model['uoptions']['dim_word']), dtype='float32')
bembedding = numpy.zeros((k+1, len(caps), model['boptions']['dim_word']), dtype='float32')
else:
uembedding = numpy.zeros((k, len(caps), model['uoptions']['dim_word']), dtype='float32')
bembedding = numpy.zeros((k, len(caps), model['boptions']['dim_word']), dtype='float32')
for ind, c in enumerate(caps):
caption = captions[c]
for j in range(len(caption)):
if d[caption[j]] > 0:
uembedding[j,ind] = model['utable'][caption[j]]
bembedding[j,ind] = model['btable'][caption[j]]
else:
uembedding[j,ind] = model['utable']['UNK']
bembedding[j,ind] = model['btable']['UNK']
if use_eos:
uembedding[-1,ind] = model['utable']['<eos>']
bembedding[-1,ind] = model['btable']['<eos>']
if use_eos:
uff = model['f_w2v'](uembedding, numpy.ones((len(caption)+1,len(caps)), dtype='float32'))
bff = model['f_w2v2'](bembedding, numpy.ones((len(caption)+1,len(caps)), dtype='float32'))
else:
uff = model['f_w2v'](uembedding, numpy.ones((len(caption),len(caps)), dtype='float32'))
bff = model['f_w2v2'](bembedding, numpy.ones((len(caption),len(caps)), dtype='float32'))
if use_norm:
for j in range(len(uff)):
uff[j] /= norm(uff[j])
bff[j] /= norm(bff[j])
for ind, c in enumerate(caps):
ufeatures[c] = uff[ind]
bfeatures[c] = bff[ind]
features = numpy.c_[ufeatures, bfeatures]
return features
def preprocess(text):
"""
Preprocess text for encoder
"""
X = []
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
for t in text:
sents = sent_detector.tokenize(t)
result = ''
for s in sents:
tokens = word_tokenize(s)
result += ' ' + ' '.join(tokens)
X.append(result)
return X
def nn(model, text, vectors, query, k=5):
"""
Return the nearest neighbour sentences to query
text: list of sentences
vectors: the corresponding representations for text
query: a string to search
"""
qf = encode(model, [query])
qf /= norm(qf)
scores = numpy.dot(qf, vectors.T).flatten()
sorted_args = numpy.argsort(scores)[::-1]
sentences = [text[a] for a in sorted_args[:k]]
print('QUERY: ' + query)
print('NEAREST: ')
for i, s in enumerate(sentences):
print(s, sorted_args[i])
def word_features(table):
"""
Extract word features into a normalized matrix
"""
features = numpy.zeros((len(table), 620), dtype='float32')
keys = list(table.keys())
for i in range(len(table)):
f = table[keys[i]]
features[i] = old_div(f, norm(f))
return features
def nn_words(table, wordvecs, query, k=10):
"""
Get the nearest neighbour words
"""
keys = list(table.keys())
qf = table[query]
scores = numpy.dot(qf, wordvecs.T).flatten()
sorted_args = numpy.argsort(scores)[::-1]
words = [keys[a] for a in sorted_args[:k]]
print('QUERY: ' + query)
print('NEAREST: ')
for i, w in enumerate(words):
print(w)
def _p(pp, name):
"""
make prefix-appended name
"""
return '%s_%s'%(pp, name)
def init_tparams(params):
"""
initialize Theano shared variables according to the initial parameters
"""
tparams = OrderedDict()
for kk, pp in params.items():
tparams[kk] = theano.shared(params[kk], name=kk)
return tparams
def load_params(path, params):
"""
load parameters
"""
pp = numpy.load(path)
for kk, vv in params.items():
if kk not in pp:
warnings.warn('%s is not in the archive'%kk)
continue
params[kk] = pp[kk]
return params
# layers: 'name': ('parameter initializer', 'feedforward')
layers = {'gru': ('param_init_gru', 'gru_layer')}
def get_layer(name):
fns = layers[name]
return (eval(fns[0]), eval(fns[1]))
def init_params(options):
"""
initialize all parameters needed for the encoder
"""
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
# encoder: GRU
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'], dim=options['dim'])
return params
def init_params_bi(options):
"""
initialize all paramters needed for bidirectional encoder
"""
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words_src'], options['dim_word'])
# encoder: GRU
params = get_layer(options['encoder'])[0](options, params, prefix='encoder',
nin=options['dim_word'], dim=options['dim'])
params = get_layer(options['encoder'])[0](options, params, prefix='encoder_r',
nin=options['dim_word'], dim=options['dim'])
return params
def build_encoder(tparams, options):
"""
build an encoder, given pre-computed word embeddings
"""
# word embedding (source)
embedding = tensor.tensor3('embedding', dtype='float32')
x_mask = tensor.matrix('x_mask', dtype='float32')
# encoder
proj = get_layer(options['encoder'])[1](tparams, embedding, options,
prefix='encoder',
mask=x_mask)
ctx = proj[0][-1]
return embedding, x_mask, ctx
def build_encoder_bi(tparams, options):
"""
build bidirectional encoder, given pre-computed word embeddings
"""
# word embedding (source)
embedding = tensor.tensor3('embedding', dtype='float32')
embeddingr = embedding[::-1]
x_mask = tensor.matrix('x_mask', dtype='float32')
xr_mask = x_mask[::-1]
# encoder
proj = get_layer(options['encoder'])[1](tparams, embedding, options,
prefix='encoder',
mask=x_mask)
projr = get_layer(options['encoder'])[1](tparams, embeddingr, options,
prefix='encoder_r',
mask=xr_mask)
ctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)
return embedding, x_mask, ctx
# some utilities
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin,nout=None, scale=0.1, ortho=True):
if nout == None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = numpy.random.uniform(low=-scale, high=scale, size=(nin, nout))
return W.astype('float32')
def param_init_gru(options, params, prefix='gru', nin=None, dim=None):
"""
parameter init for GRU
"""
if nin == None:
nin = options['dim_proj']
if dim == None:
dim = options['dim_proj']
W = numpy.concatenate([norm_weight(nin,dim),
norm_weight(nin,dim)], axis=1)
params[_p(prefix,'W')] = W
params[_p(prefix,'b')] = numpy.zeros((2 * dim,)).astype('float32')
U = numpy.concatenate([ortho_weight(dim),
ortho_weight(dim)], axis=1)
params[_p(prefix,'U')] = U
Wx = norm_weight(nin, dim)
params[_p(prefix,'Wx')] = Wx
Ux = ortho_weight(dim)
params[_p(prefix,'Ux')] = Ux
params[_p(prefix,'bx')] = numpy.zeros((dim,)).astype('float32')
return params
def gru_layer(tparams, state_below, options, prefix='gru', mask=None, **kwargs):
"""
Forward pass through GRU layer
"""
nsteps = state_below.shape[0]
if state_below.ndim == 3:
n_samples = state_below.shape[1]
else:
n_samples = 1
dim = tparams[_p(prefix,'Ux')].shape[1]
if mask == None:
mask = tensor.alloc(1., state_below.shape[0], 1)
def _slice(_x, n, dim):
if _x.ndim == 3:
return _x[:, :, n*dim:(n+1)*dim]
return _x[:, n*dim:(n+1)*dim]
state_below_ = tensor.dot(state_below, tparams[_p(prefix, 'W')]) + tparams[_p(prefix, 'b')]
state_belowx = tensor.dot(state_below, tparams[_p(prefix, 'Wx')]) + tparams[_p(prefix, 'bx')]
U = tparams[_p(prefix, 'U')]
Ux = tparams[_p(prefix, 'Ux')]
def _step_slice(m_, x_, xx_, h_, U, Ux):
preact = tensor.dot(h_, U)
preact += x_
r = tensor.nnet.sigmoid(_slice(preact, 0, dim))
u = tensor.nnet.sigmoid(_slice(preact, 1, dim))
preactx = tensor.dot(h_, Ux)
preactx = preactx * r
preactx = preactx + xx_
h = tensor.tanh(preactx)
h = u * h_ + (1. - u) * h
h = m_[:,None] * h + (1. - m_)[:,None] * h_
return h
seqs = [mask, state_below_, state_belowx]
_step = _step_slice
rval, updates = theano.scan(_step,
sequences=seqs,
outputs_info = [tensor.alloc(0., n_samples, dim)],
non_sequences = [tparams[_p(prefix, 'U')],
tparams[_p(prefix, 'Ux')]],
name=_p(prefix, '_layers'),
n_steps=nsteps,
profile=profile,
strict=True)
rval = [rval]
return rval
|
nilq/baby-python
|
python
|
#!/bin/env python
##
# @file This file is part of the ExaHyPE project.
# @author ExaHyPE Group (exahype@lists.lrz.de)
#
# @section LICENSE
#
# Copyright (c) 2016 http://exahype.eu
# All rights reserved.
#
# The project has received funding from the European Union's Horizon
# 2020 research and innovation programme under grant agreement
# No 671698. For copyrights and licensing, please consult the webpage.
#
# Released under the BSD 3 Open Source License.
# For the full license text, see LICENSE.txt
#
#
# @section DESCRIPTION
#
# Controller of the code generator
#
# @note
# requires python3
import os
import copy
import subprocess
import errno
import time
from .configuration import Configuration
from .argumentParser import ArgumentParser
from .models import *
class Controller:
"""Main Controller
Read the input from the public API, validate them and generate a base
context for the models.
Use generateCode() to run the models with the base context.
Can generate gemms with generateGemms(outputFile, matmulconfig), will be done
automatically when using generateCode().
"""
def __init__(self, inputConfig = None):
"""Initialize the base config from the command line inputs"""
Configuration.checkPythonVersion()
if inputConfig == None:
args = ArgumentParser.parseArgs()
else:
ArgumentParser.validateInputConfig(inputConfig)
args = inputConfig
self.commandLine = ArgumentParser.buildCommandLineFromConfig(args)
# Generate the base config from the args input
self.config = {
"numerics" : args["numerics"],
"pathToOptKernel" : args["pathToOptKernel"],
"solverName" : args["solverName"],
"nVar" : args["numberOfVariables"],
"nPar" : args["numberOfParameters"],
"nData" : args["numberOfVariables"] + args["numberOfParameters"],
"nDof" : (args["order"])+1,
"nDim" : args["dimension"],
"useFlux" : (args["useFlux"] or args["useFluxVect"]),
"useFluxVect" : args["useFluxVect"],
"useNCP" : (args["useNCP"] or args["useNCPVect"]),
"useNCPVect" : args["useNCPVect"],
"useSource" : (args["useSource"] or args["useSourceVect"] or args["useFusedSource"] or args["useFusedSourceVect"]),
"useSourceVect" : args["useSourceVect"],
"useFusedSource" : (args["useFusedSource"] or args["useFusedSourceVect"]),
"useFusedSourceVect" : args["useFusedSourceVect"],
"nPointSources" : args["usePointSources"],
"usePointSources" : args["usePointSources"] >= 0,
"useMaterialParam" : (args["useMaterialParam"] or args["useMaterialParamVect"]),
"useMaterialParamVect" : args["useMaterialParamVect"],
"codeNamespace" : args["namespace"],
"pathToOutputDirectory" : os.path.join(Configuration.pathToExaHyPERoot, args["pathToApplication"], args["pathToOptKernel"]),
"architecture" : args["architecture"],
"useLimiter" : args["useLimiter"] >= 0,
"nObs" : args["useLimiter"],
"ghostLayerWidth" : args["ghostLayerWidth"],
"pathToLibxsmmGemmGenerator" : Configuration.pathToLibxsmmGemmGenerator,
"quadratureType" : ("Gauss-Lobatto" if args["useGaussLobatto"] else "Gauss-Legendre"),
"useCERKGuess" : args["useCERKGuess"],
"useSplitCKScalar" : args["useSplitCKScalar"],
"useSplitCKVect" : args["useSplitCKVect"],
"tempVarsOnStack" : args["tempVarsOnStack"],
"useLibxsmm" : Configuration.useLibxsmm,
"runtimeDebug" : Configuration.runtimeDebug #for debug
}
self.config["useSourceOrNCP"] = self.config["useSource"] or self.config["useNCP"]
self.validateConfig(Configuration.simdWidth.keys())
self.config["vectSize"] = Configuration.simdWidth[self.config["architecture"]] #only initialize once architecture has been validated
self.baseContext = self.generateBaseContext() # default context build from config
self.gemmList = [] #list to store the name of all generated gemms (used for gemmsCPPModel)
def validateConfig(self, validArchitectures):
"""Ensure the configuration fit some constraint, raise errors if not"""
if not (self.config["architecture"] in validArchitectures):
raise ValueError("Architecture not recognized. Available architecture: "+str(validArchitectures))
if not (self.config["numerics"] == "linear" or self.config["numerics"] == "nonlinear"):
raise ValueError("numerics has to be linear or nonlinear")
if self.config["nVar"] < 0:
raise ValueError("Number of variables must be >=0 ")
if self.config["nPar"] < 0:
raise ValueError("Number of parameters must be >= 0")
if self.config["nDim"] < 2 or self.config["nDim"] > 3:
raise ValueError("Number of dimensions must be 2 or 3")
if self.config["nDof"] < 1 or self.config["nDof"] > 10: #nDof = order+1
raise ValueError("Order has to be between 0 and 9")
#if (self.config["useSource"] and not self.config["useSourceVect"] and self.config["useNCPVect"]) or (self.config["useNCP"] and not self.config["useNCPVect"] and self.config["useSourceVect"]) :
# raise ValueError("If using source and NCP, both or neither must be vectorized")
def printConfig(self):
print(self.config)
def generateBaseContext(self):
"""Generate a base context for the models from the config (use hard copy)"""
context = copy.copy(self.config)
context["nVarPad"] = self.getSizeWithPadding(context["nVar"])
context["nParPad"] = self.getSizeWithPadding(context["nPar"])
context["nDataPad"] = self.getSizeWithPadding(context["nData"])
context["nDofPad"] = self.getSizeWithPadding(context["nDof"])
context["nDof3D"] = 1 if context["nDim"] == 2 else context["nDof"]
context["isLinear"] = context["numerics"] == "linear"
context["solverHeader"] = context["solverName"].split("::")[1] + ".h"
context["codeNamespaceList"] = context["codeNamespace"].split("::")
context["guardNamespace"] = "_".join(context["codeNamespaceList"]).upper()
context["nDofLim"] = 2*context["nDof"]-1 #for limiter
context["nDofLimPad"] = self.getSizeWithPadding(context["nDofLim"])
context["nDofLim3D"] = 1 if context["nDim"] == 2 else context["nDofLim"]
context["ghostLayerWidth3D"] = 0 if context["nDim"] == 2 else context["ghostLayerWidth"]
context["useVectPDEs"] = context["useFluxVect"] or True #TODO JMG add other vect
return context
def getSizeWithPadding(self, sizeWithoutPadding):
"""Return the size of the input with the architecture specific padding added"""
return self.config["vectSize"] * int((sizeWithoutPadding+(self.config["vectSize"]-1))/self.config["vectSize"])
def getPadSize(self, sizeWithoutPadding):
"""Return the size of padding required for its input"""
return self.getSizeWithPadding(sizeWithoutPadding) - sizeWithoutPadding
def generateCode(self):
"""Main method: call the models to generate the code"""
# create directory for output files if not existing
try:
os.makedirs(self.config['pathToOutputDirectory'])
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# remove all .cpp, .cpph, .c and .h files (we are in append mode!)
for fileName in os.listdir(self.config['pathToOutputDirectory']):
_ , ext = os.path.splitext(fileName)
if(ext in [".cpp", ".cpph", ".c", ".h"]):
os.remove(self.config['pathToOutputDirectory'] + "/" + fileName)
# generate new files
runtimes = {}
start = time.perf_counter()
adjustSolution = adjustSolutionModel.AdjustSolutionModel(self.baseContext)
adjustSolution.generateCode()
runtimes["adjustSolution"] = time.perf_counter() - start
start = time.perf_counter()
amrRoutines = amrRoutinesModel.AMRRoutinesModel(self.baseContext, self)
amrRoutines.generateCode()
runtimes["amrRoutines"] = time.perf_counter() - start
start = time.perf_counter()
boundaryConditions = boundaryConditionsModel.BoundaryConditionsModel(self.baseContext)
boundaryConditions.generateCode()
runtimes["boundaryConditions"] = time.perf_counter() - start
start = time.perf_counter()
configurationParameters = configurationParametersModel.ConfigurationParametersModel(self.baseContext)
configurationParameters.generateCode()
runtimes["configurationParameters"] = time.perf_counter() - start
start = time.perf_counter()
converter = converterModel.ConverterModel(self.baseContext)
converter.generateCode()
runtimes["converter"] = time.perf_counter() - start
start = time.perf_counter()
deltaDistribution = deltaDistributionModel.DeltaDistributionModel(self.baseContext)
deltaDistribution.generateCode()
runtimes["deltaDistribution"] = time.perf_counter() - start
start = time.perf_counter()
dgMatrix = dgMatrixModel.DGMatrixModel(self.baseContext)
dgMatrix.generateCode()
runtimes["dgMatrix"] = time.perf_counter() - start
start = time.perf_counter()
faceIntegral = faceIntegralModel.FaceIntegralModel(self.baseContext)
faceIntegral.generateCode()
runtimes["faceIntegral"] = time.perf_counter() - start
start = time.perf_counter()
fusedSpaceTimePredictorVolumeIntegral = fusedSpaceTimePredictorVolumeIntegralModel.FusedSpaceTimePredictorVolumeIntegralModel(self.baseContext, self)
fusedSpaceTimePredictorVolumeIntegral.generateCode()
runtimes["fusedSpaceTimePredictorVolumeIntegral"] = time.perf_counter() - start
start = time.perf_counter()
kernelsHeader = kernelsHeaderModel.KernelsHeaderModel(self.baseContext)
kernelsHeader.generateCode()
runtimes["kernelsHeader"] = time.perf_counter() - start
start = time.perf_counter()
limiter = limiterModel.LimiterModel(self.baseContext, self)
limiter.generateCode()
runtimes["limiter"] = time.perf_counter() - start
start = time.perf_counter()
matrixUtils = matrixUtilsModel.MatrixUtilsModel(self.baseContext)
matrixUtils.generateCode()
runtimes["matrixUtils"] = time.perf_counter() - start
start = time.perf_counter()
quadrature = quadratureModel.QuadratureModel(self.baseContext, self)
quadrature.generateCode()
runtimes["quadrature"] = time.perf_counter() - start
start = time.perf_counter()
riemann = riemannModel.RiemannModel(self.baseContext)
riemann.generateCode()
runtimes["riemann"] = time.perf_counter() - start
start = time.perf_counter()
solutionUpdate = solutionUpdateModel.SolutionUpdateModel(self.baseContext)
solutionUpdate.generateCode()
runtimes["solutionUpdate"] = time.perf_counter() - start
start = time.perf_counter()
stableTimeStepSize = stableTimeStepSizeModel.StableTimeStepSizeModel(self.baseContext)
stableTimeStepSize.generateCode()
runtimes["stableTimeStepSize"] = time.perf_counter() - start
start = time.perf_counter()
surfaceIntegral = surfaceIntegralModel.SurfaceIntegralModel(self.baseContext)
surfaceIntegral.generateCode()
runtimes["surfaceIntegral"] = time.perf_counter() - start
# must be run only after all gemm have been generated
start = time.perf_counter()
gemmsContext = copy.copy(self.baseContext)
gemmsContext["gemmList"] = self.gemmList
gemmsCPP = gemmsCPPModel.GemmsCPPModel(gemmsContext)
gemmsCPP.generateCode()
runtimes["gemmsCPP"] = time.perf_counter() - start
if self.config['runtimeDebug']:
for key, value in runtimes.items():
print(key+": "+str(value))
def generateGemms(self, outputFileName, matmulConfigList):
"""Generate the gemms with the given config list using LIBXSMM"""
for matmul in matmulConfigList:
# add the gemm name to the list of generated gemm
self.gemmList.append(matmul.baseroutinename)
# for plain assembly code (rather than inline assembly) choose dense_asm
commandLineArguments = " " + "dense" + \
" " + os.path.join(self.config["pathToOutputDirectory"], outputFileName) + \
" " + self.config["codeNamespace"] + "::" + matmul.baseroutinename + \
" " + str(matmul.M) + \
" " + str(matmul.N) + \
" " + str(matmul.K) + \
" " + str(matmul.LDA) + \
" " + str(matmul.LDB) + \
" " + str(matmul.LDC) + \
" " + str(matmul.alpha) + \
" " + str(matmul.beta) + \
" " + str(matmul.alignment_A) + \
" " + str(matmul.alignment_C) + \
" " + self.config["architecture"] + \
" " + matmul.prefetchStrategy + \
" " + "DP" #always use double precision, "SP" for single
bashCommand = self.config["pathToLibxsmmGemmGenerator"] + commandLineArguments
subprocess.call(bashCommand.split())
|
nilq/baby-python
|
python
|
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from sklearn.impute import KNNImputer
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler
class TrainModel():
@classmethod
def transformerFor(cls, cat_cols, num_cols):
"""Construct a column transformer for the named columns
Please see https://jaketae.github.io/study/sklearn-pipeline/ on
which this implementation is based.
Args:
cat_cols (List): Categorical column names
num_cols (List): Numerical column names
Returns:
ColumnTransformer: a column transformer
"""
# Categorical column transformer
cat_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy='most_frequent')),
('onehot', OneHotEncoder(handle_unknown='ignore', sparse=False)),
('pca', PCA(n_components=10))
])
# Numerical column transformer
num_transformer = Pipeline(steps=[
('imputer', KNNImputer(n_neighbors=5)),
('scaler', RobustScaler())
])
return ColumnTransformer(
transformers=[
('num', num_transformer, num_cols),
('cat', cat_transformer, cat_cols)
])
@classmethod
def pipelineFor(cls, preprocessor, classifier):
"""Construct a pipeline for the specified preprocessor and classifier
Args:
preprocessor (ColumnTransformer): A column transformer
classifier (Classifier): A model classifier
Returns:
Pipeline: A Pipeline suitable for classification use
"""
return Pipeline(steps=[('preprocessor', preprocessor),
('classifier', classifier)])
@classmethod
def tunedParameters(cls):
"""Define search parameters
Returns:
Dictionary: A dictionary of key-value search parameters
"""
num_transformer_dist = {'preprocessor__num__imputer__n_neighbors': list(range(2, 15)),
'preprocessor__num__imputer__add_indicator': [True, False]}
cat_transformer_dist = {'preprocessor__cat__imputer__strategy': ['most_frequent', 'constant'],
'preprocessor__cat__imputer__add_indicator': [True, False],
'preprocessor__cat__pca__n_components': list(range(2, 15))}
random_forest_dist = {'classifier__n_estimators': list(range(50, 500)),
'classifier__max_depth': list(range(2, 20)),
'classifier__bootstrap': [True, False]}
return {**num_transformer_dist, **cat_transformer_dist, **random_forest_dist}
|
nilq/baby-python
|
python
|
import cv2
from .drawBoxes import drawBoxes
def addPedestriansToTrack(image, tracker, trackers, trackedObjectsNum):
if trackers == None:
trackers = cv2.MultiTracker_create()
markedObjects = trackedObjectsNum
while True:
manualMarking = cv2.selectROI("Mark pedestrian to track", image)
if manualMarking != (0, 0, 0, 0):
markedObjects = markedObjects + 1
trackers.add(tracker(), image, manualMarking)
drawBoxes(image, [manualMarking])
print("Hit Enter to continue")
print("Hit backspace to clear all tracked objects")
print("Hit any other key to add next object")
key = cv2.waitKey(0)
cv2.destroyWindow("Mark pedestrian to track")
if key == ord("\r"):
return [trackers, markedObjects]
if key == 8:
trackers = cv2.MultiTracker_create()
markedObjects = 0
print("!! You clear all tracked objects !!")
|
nilq/baby-python
|
python
|
import argparse
import io
import csv
import scipy
from scipy.sparse import csr_matrix
import numpy as np
import tensorflow as tf
def add_data(r, indptr, indices, data, vocab):
if len(r) > 1:
label = r[0]
for f in r[1:]:
if f:
k, v = f.split(':')
idx = vocab.setdefault(k, len(vocab))
indices.append(idx)
data.append(float(v))
indptr.append(len(indices))
return label, indptr, indices, data, vocab
return False, indptr, indices, data, vocab
def process_file(fn, indptr, indices, data, vocab):
y = []
with io.open(fn) as fh:
csvr = csv.reader(fh, delimiter = ' ')
for r in csvr:
label, indptr, indices, data, vocab = add_data(r, indptr, indices, data, vocab)
if label is not None:
y.append(label)
return y, indptr, indices, data, vocab
def parse(data_fn):
indptr = [0]
indices, data, vocab = [], [], dict()
y, indptr, indices, data, vocab = process_file(data_fn, indptr, indices, data, vocab)
x = csr_matrix((data, indices, indptr), dtype=np.float32)
x.sort_indices()
return x, y
def compress(x, y, model, out_fn):
x_new = model.predict(x)
with io.open(out_fn, 'w') as fh:
for i, x in enumerate(x_new):
fh.write('{} {}\n'.format(y[i], ' '.join('{}:{}'.format(j, v) for j, v in enumerate(x))))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parses a libSVM-formatted dataset.')
parser.add_argument('-d', '--dataset', required=True, help='Input dataset for reduction.')
parser.add_argument('-m', '--model', required=False, help='Trained compressor model file.')
parser.add_argument('-o', '--output', required=True, help='Output file with reduced data in libSVM format.')
args = parser.parse_args()
x, y = parse(args.dataset)
model = tf.keras.models.load_model(args.model)
compress(x, y, model, args.output)
|
nilq/baby-python
|
python
|
import importlib
import xarray as xr
import numpy as np
import pandas as pd
import sys
import os
from CASutils import filter_utils as filt
from CASutils import calendar_utils as cal
importlib.reload(filt)
importlib.reload(cal)
def calcdeseas(da):
datseas = da.groupby('time.dayofyear').mean('time', skipna=True)
dat4harm = filt.calc_season_nharm(datseas, 4, dimtime=0)
anoms = da.groupby('time.dayofyear') - dat4harm
datdeseas = cal.group_season_daily(anoms, 'DJF')
seasmean = datdeseas.mean('day', skipna=True)
datdeseas = datdeseas - seasmean
#datdeseas = np.array(datdeseas).flatten()
return datdeseas
basepath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/CAM/"
trefht_clm5 = xr.open_dataset(basepath+"TREFHT_Isla_CAM6_CLM5_002.nc")
trefht_clm5_deseas = calcdeseas(trefht_clm5.trefht)
cities = trefht_clm5.city
ncities = trefht_clm5.city.size
for icity in range(0,ncities,1):
trefht_clm5 = np.array(trefht_clm5_deseas[:,:,icity]).flatten()
# calculate the ptile bin ranges
nblocks = 10
binmin = np.empty([nblocks]) ; binmax = np.empty([nblocks])
for iblock in np.arange(0,nblocks,1):
binmin[iblock] = np.percentile(trefht_clm5,iblock*10)
binmax[iblock] = np.percentile(trefht_clm5,iblock*10+10)
if (iblock == 0):
binmin[iblock] = np.percentile(trefht_clm5,1)
if (iblock == (nblocks-1)):
binmax[iblock] = np.percentile(trefht_clm5,99)
outpath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/trefhtptile_composites/3cities/"
basepath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/OBS/"
trefht = xr.open_dataset(basepath+"ERA5_TREFHT.nc")
basepath="/project/cas/islas/python_savs/snowpaper/DATA_SORT/3cities/ERA5/"
dat = xr.open_dataset(basepath+"ERA5_increments.nc")
increments_deseas = calcdeseas(dat.increments)
forecast_deseas = calcdeseas(dat.forecast)
analysis_deseas = calcdeseas(dat.analysis)
trefht_deseas = calcdeseas(trefht.era5)
cities=dat.city
ncities = dat.city.size
for icity in range(0,ncities,1):
trefht = np.array(trefht_deseas[:,:,icity]).flatten()
increments = np.array(increments_deseas[:,:,icity]).flatten()
forecast = np.array(forecast_deseas[:,:,icity]).flatten()
analysis = np.array(analysis_deseas[:,:,icity]).flatten()
if (icity == 0):
incrementcomp = np.zeros([nblocks, ncities])
forecastcomp = np.zeros([nblocks, ncities])
analysiscomp = np.zeros([nblocks, ncities])
for iblock in np.arange(0,nblocks,1):
incrementcomp[iblock, icity] = \
(increments[(analysis >= binmin[iblock]) & (analysis < binmax[iblock])]).mean()
forecastcomp[iblock, icity] = \
(forecast[(analysis >= binmin[iblock]) & (analysis < binmax[iblock])]).mean()
analysiscomp[iblock, icity] = \
(analysis[(analysis >= binmin[iblock]) & (analysis < binmax[iblock])]).mean()
increment_xr = xr.DataArray(incrementcomp,
coords=[np.arange(0,nblocks,1),cities], dims=['ptile','city'], name='increment')
forecast_xr = xr.DataArray(forecastcomp,
coords=[np.arange(0,nblocks,1),cities], dims=['ptile','city'], name='forecast')
analysis_xr = xr.DataArray(analysiscomp,
coords=[np.arange(0,nblocks,1),cities], dims=['ptile','city'], name='analysis')
increment_xr.to_netcdf(path=outpath+'trefhtptilecomposites_3cities_ERA5increments.nc')
forecast_xr.to_netcdf(path=outpath+'trefhtptilecomposites_3cities_ERA5increments.nc', mode='a')
analysis_xr.to_netcdf(path=outpath+'trefhtptilecomposites_3cities_ERA5increments.nc', mode='a')
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.