content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
print("Hello world from my VM")
| [
4798,
7203,
15496,
995,
422,
616,
16990,
4943,
198
] | 3.555556 | 9 |
from random import choice
aluno1 = input("Primeiro aluno: ")
aluno2 = input("Segundo aluno: ")
aluno3 = input("Terceiro aluno: ")
aluno4 = input("Quarto aluno: ")
alunos = [aluno1, aluno2, aluno3, aluno4]
print(f"O aluno escolhido foi {choice(alunos)}.")
| [
6738,
4738,
1330,
3572,
198,
282,
36909,
16,
796,
5128,
7203,
26405,
7058,
435,
36909,
25,
366,
8,
198,
282,
36909,
17,
796,
5128,
7203,
41030,
41204,
435,
36909,
25,
366,
8,
198,
282,
36909,
18,
796,
5128,
7203,
15156,
344,
7058,
4... | 2.383178 | 107 |
#!/usr/bin/env python3
import os
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
from torch.nn.modules.loss import _Loss
from torch.autograd import Variable
import concurrent.futures
import numpy as np
import pickle as pkl
from common import Config
from lib.utils.basic_utils import Basic_Utils
from lib.utils.meanshift_pytorch import MeanShiftTorch
config = Config(dataset_name='ycb')
bs_utils = Basic_Utils(config)
config_lm = Config(dataset_name="linemod")
bs_utils_lm = Basic_Utils(config_lm)
cls_lst = config.ycb_cls_lst
# vim: ts=4 sw=4 sts=4 expandtab
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
1845,
29363,
198,
11748,
28034,
13,
1891,
2412,
13,
... | 2.767544 | 228 |
# proxy module
from codetools.util.tree import *
| [
2,
15741,
8265,
198,
6738,
14873,
316,
10141,
13,
22602,
13,
21048,
1330,
1635,
198
] | 3.266667 | 15 |
#!/usr/bin/env python3
import queue
import threading
import time
from modules import device_tray as DC
class PollingClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, root_view, load_settings, save_settings):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
self.root_view = root_view
self.load_settings = load_settings
self.save_settings = save_settings
# Create the queue
self.queue = queue.Queue()
# Set up the GUI part
self.gui = DC.DeviceTray(self.root_view, self.queue, self.load_settings, self.save_settings, self.endApplication)
# Set up the thread to do asynchronous I/O
# More threads can also be created and used, if necessary
self.running = 1
self.poll_thread = threading.Thread(target=self.workerThread)
self.poll_thread.start()
# Start the periodic call in the GUI to check if the queue contains
# anything
self.periodicCall()
def periodicCall(self):
"""
Check every 200 ms if there is something new in the queue.
"""
self.gui.processIncoming()
if not self.running:
# This is the brutal stop of the system. You may want to do
# some cleanup before actually shutting it down.
import sys
sys.exit(1)
self.root_view.after(200, self.periodicCall)
def workerThread(self):
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select()'. One important thing to remember is that the thread has
to yield control pretty regularly, by select or otherwise.
"""
while self.running:
# To simulate asynchronous I/O, we create a random number at
# random intervals. Replace the following two lines with the real
# thing.
time.sleep(0.25)
message = 0
self.queue.put(message)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
16834,
198,
11748,
4704,
278,
198,
11748,
640,
198,
6738,
13103,
1330,
3335,
62,
2213,
323,
355,
6257,
198,
198,
4871,
12868,
278,
11792,
25,
198,
220,
220,
220,
37227,... | 2.606667 | 900 |
import bpy
from .. data_structures import Mesh
from .. base_types import AnimationNodeSocket, PythonListSocket
| [
11748,
275,
9078,
198,
6738,
11485,
1366,
62,
7249,
942,
1330,
47529,
198,
6738,
11485,
2779,
62,
19199,
1330,
23535,
19667,
39105,
11,
11361,
8053,
39105,
628
] | 4.148148 | 27 |
import pathlib
import logging
from logging import Logger
from logging.handlers import RotatingFileHandler
from photon.common.config_context_common import ConfigContextCommon
class LoggingCommon:
"""
Helper class for logging.
"""
def __init__(self, config: ConfigContextCommon) -> None:
"""
Args:
config: A config object.
"""
self._root_logger = logging.getLogger()
self._root_logger.setLevel(config.LOG_LEVEL)
self._formatter = logging.Formatter(fmt=config.LOG_FORMAT, style="{")
self._logname = config.PACKAGE_NAME
self._logdirp = (
pathlib.Path("/usr/local/var") / config.PACKAGE_FAMILY / config.PACKAGE_NAME
)
self._file_handler()
self._console_handler()
def get_root_logger(self) -> Logger:
"""
Return the root logger.
Returns:
The root logger.
"""
return self._root_logger
| [
11748,
3108,
8019,
198,
11748,
18931,
198,
6738,
18931,
1330,
5972,
1362,
198,
6738,
18931,
13,
4993,
8116,
1330,
18481,
803,
8979,
25060,
198,
198,
6738,
48190,
13,
11321,
13,
11250,
62,
22866,
62,
11321,
1330,
17056,
21947,
17227,
628,
... | 2.328537 | 417 |
# IMPORTING LIBRARIES
import numpy as np
import cv2
import math
#LOADING HAND CASCADE
hand_cascade = cv2.CascadeClassifier('Hand_haar_cascade.xml')
# VIDEO CAPTURE
cap = cv2.VideoCapture(0)
while True:
ret, img = cap.read()
blur = cv2.GaussianBlur(img,(5,5),0) # BLURRING IMAGE TO SMOOTHEN EDGES
gray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY) # BGR -> GRAY CONVERSION
retval2,thresh1 = cv2.threshold(gray,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU) # THRESHOLDING IMAGE
hand = hand_cascade.detectMultiScale(thresh1, 1.3, 5) # DETECTING HAND IN THE THRESHOLDE IMAGE
mask = np.zeros(thresh1.shape, dtype = "uint8") # CREATING MASK
for (x,y,w,h) in hand: # MARKING THE DETECTED ROI
cv2.rectangle(img,(x,y),(x+w,y+h), (122,122,0), 2)
cv2.rectangle(mask, (x,y),(x+w,y+h),255,-1)
img2 = cv2.bitwise_and(thresh1, mask)
final = cv2.GaussianBlur(img2,(7,7),0)
contours, hierarchy = cv2.findContours(final, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(img, contours, 0, (255,255,0), 3)
cv2.drawContours(final, contours, 0, (255,255,0), 3)
if len(contours) > 0:
cnt=contours[0]
hull = cv2.convexHull(cnt, returnPoints=False)
# finding convexity defects
defects = cv2.convexityDefects(cnt, hull)
count_defects = 0
# applying Cosine Rule to find angle for all defects (between fingers)
# with angle > 90 degrees and ignore defect
if defects!= None:
for i in range(defects.shape[0]):
p,q,r,s = defects[i,0]
finger1 = tuple(cnt[p][0])
finger2 = tuple(cnt[q][0])
dip = tuple(cnt[r][0])
# find length of all sides of triangle
a = math.sqrt((finger2[0] - finger1[0])**2 + (finger2[1] - finger1[1])**2)
b = math.sqrt((dip[0] - finger1[0])**2 + (dip[1] - finger1[1])**2)
c = math.sqrt((finger2[0] - dip[0])**2 + (finger2[1] - dip[1])**2)
# apply cosine rule here
angle = math.acos((b**2 + c**2 - a**2)/(2*b*c)) * 57.29
# ignore angles > 90 and highlight rest with red dots
if angle <= 90:
count_defects += 1
#cv2.putText(img,"THIS IS 1", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
# define actions required
if count_defects == 1:
cv2.putText(img,"THIS IS 2", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 2:
cv2.putText(img, "THIS IS 3", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 3:
cv2.putText(img,"This is 4", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
elif count_defects == 4:
cv2.putText(img,"THIS IS 5", (50, 50), cv2.FONT_HERSHEY_SIMPLEX, 2, 2)
cv2.imshow('img',thresh1)
cv2.imshow('img1',img)
cv2.imshow('img2',img2)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
2,
30023,
9863,
2751,
45651,
49,
1503,
11015,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
10688,
198,
198,
2,
35613,
2751,
367,
6981,
35106,
34,
19266,
198,
4993,
62,
66,
28966,
796,
269,
85,
17,
13,
34,... | 2.071373 | 1,289 |
# nyokaUtilities.py
import numpy as np
from random import choice
from string import ascii_uppercase
import copy,json
import ast,pathlib
import traceback
from nyoka import PMML43Ext as ny
global MEMORY_DICT_ARCHITECTURE,MEMORY_OF_LAYERS
settingFilePath='./settingFiles/'
savedModels='./SavedModels/'
MEMORY_OF_LAYERS={}
from trainModel.mergeTrainingV2 import PMMLMODELSTORAGE
layerDetail=open(settingFilePath+'listOflayers.json','r')
MEMORY_OF_LAYERS=json.loads(layerDetail.read())
#########################################All functions is to write PMML###############################
###################Below script is to get detaisl from a PMML file########################
#####################Add Update layer Utility Functions
| [
2,
299,
88,
17411,
18274,
2410,
13,
9078,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4738,
1330,
3572,
198,
6738,
4731,
1330,
355,
979,
72,
62,
7211,
2798,
589,
198,
11748,
4866,
11,
17752,
198,
11748,
6468,
11,
6978,
8019,
198,
... | 3.046875 | 256 |
from djangobench.utils import run_benchmark
from query_update.models import Book
run_benchmark(
benchmark,
meta = {
'description': 'A simple QuerySet.update().',
}
)
| [
6738,
42625,
648,
672,
24421,
13,
26791,
1330,
1057,
62,
26968,
4102,
198,
6738,
12405,
62,
19119,
13,
27530,
1330,
4897,
198,
198,
5143,
62,
26968,
4102,
7,
198,
220,
220,
220,
18335,
11,
198,
220,
220,
220,
13634,
796,
1391,
198,
... | 2.710145 | 69 |
import sys
import io
import re
import os
from pathlib import Path
from contextlib import contextmanager
from invoke import task
import requests
"""Tasks for cellpy development.
You need to have invoke installed in your
python environment for this to work.
Examples:
# build and upload to pypi:
> invoke build --upload
# build only the docs
> invoke build --docs
# clean up
> invoke clean
# clean up and build
> invoke clean build
"""
def get_platform():
"""get the platform you are running on"""
platforms = {
"linux1": "Linux",
"linux2": "Linux",
"darwin": "OS X",
"win32": "Windows",
"win64": "Windows",
}
if sys.platform not in platforms:
return sys.platform
return platforms[sys.platform]
@contextmanager
def capture():
"""context manager to capture output from a running subproject"""
o_stream = io.StringIO()
yield o_stream
print(o_stream.getvalue())
o_stream.close()
def get_pypi_info(package="cellpy"):
"""get version number and sha256 for a pypi package
Args:
package (str): name of package
Returns:
[version, sha256]
"""
url = f"https://pypi.org/pypi/{package}/json"
response = requests.get(url)
if not response:
print(f"url {url} not responding")
return None, None
response = response.json()
version = response["info"]["version"]
release = response["releases"][version][-1]
sha256 = release["digests"]["sha256"]
return version, sha256
@task
def pypi(c, package="cellpy"):
"""Query pypi"""
version, sha = get_pypi_info(package=package)
if version:
print(f"version: {version}")
print(f"sha256: {sha}")
@task
def commit(c, push=True, comment="automatic commit"):
"""Simply commit and push"""
cos = get_platform()
print(" Running commit task ".center(80, "="))
print(f"Running on platform: {cos}")
print(" status ".center(80, "-"))
with capture() as o:
c.run("git status", out_stream=o)
status_lines = o.getvalue()
# it seems it is also possible to do
# out = c.run(command)
# status_lines = out.stdout
new_files_regex = re.compile(r"modified:[\s]+([\S]+)")
new_files = new_files_regex.search(status_lines)
if new_files:
print(new_files.groups())
print(" staging ".center(80, "-"))
c.run("git add .")
print(" committing ".center(80, "-"))
c.run(f'git commit . -m "{comment}"')
if push:
print(" pushing ".center(80, "-"))
c.run("git push")
print(" finished ".center(80, "-"))
@task
def clean(c, docs=False, bytecode=False, extra=""):
"""Clean up stuff from previous builds"""
print(" Cleaning ".center(80, "="))
patterns = ["dist", "build", "cellpy.egg-info"]
if docs:
print(" - cleaning doc builds")
patterns.append("docs/_build")
if bytecode:
print(" - cleaning bytecode (i.e. pyc-files)")
patterns.append("**/*.pyc")
if extra:
print(f" - cleaning {extra}")
patterns.append(extra)
for pattern in patterns:
print(".", end="")
c.run("rm -rf {}".format(pattern))
print()
print(f"Cleaned {patterns}")
@task
def info(c, full=False):
"""Get info about your cellpy"""
import cellpy
from pathlib import Path
print()
version_file_path = Path("cellpy") / "_version.py"
version_ns = {}
with open(version_file_path) as f:
exec(f.read(), {}, version_ns)
version, sha = get_pypi_info(package="cellpy")
print(" INFO ".center(80, "="))
print(" version ".center(80, "-"))
print(f"version (by import cellpy): cellpy {cellpy.__version__}")
print(f"version (in _version.py): cellpy {version_ns['__version__']}")
if version:
print(f"version on PyPI: cellpy {version}")
@task
@task
@task
@task
def test(c):
"""Run tests with coverage"""
c.run("pytest --cov=cellpy tests/")
@task
def build(c, docs=False, upload=True):
"""Create distribution (and optionally upload to PyPI)"""
print(" Creating distribution ".center(80, "="))
print("Running python setup.py sdist")
c.run("python setup.py sdist")
if docs:
print(" Building docs ".center(80, "-"))
c.run("sphinx-build docs docs/_build")
if upload:
print(" Uploading to PyPI ".center(80, "="))
print(" Running 'twine upload dist/*'")
print(" Trying with using username and password from keyring.")
c.run("twine upload dist/*")
else:
print(" To upload to pypi: 'twine upload dist/*'")
@task
def conda_build(c, upload=False):
"""Create conda distribution"""
recipe_path = Path("./recipe/meta.yaml")
print(" Creating conda distribution ".center(80, "="))
if not recipe_path.is_file():
print(f"conda recipe not found ({str(recipe_path.resolve())})")
return
version, sha = get_pypi_info(package="cellpy")
update_dict = {"name": "cellpy", "version": version, "sha": sha}
print("Updating meta.yml")
update_meta_yaml(recipe_path, update_dict)
print("Running conda build")
print(update_dict)
with capture() as o:
c.run("conda build recipe", out_stream=o)
status_lines = o.getvalue()
new_files_regex = re.compile(r"TEST END: (.+)")
new_files = new_files_regex.search(status_lines)
path = new_files.group(1)
if upload:
upload_cmd = f"anaconda upload {path}"
c.run(upload_cmd)
else:
print(f"\nTo upload: anaconda upload {path}")
print("\nTo convert to different OS-es: conda convert --platform all PATH")
print("e.g.")
print("cd builds")
print(
r"conda convert --platform all "
r"C:\miniconda\envs\cellpy_dev\conda-bld\win-"
r"64\cellpy-0.3.0.post1-py37_0.tar.bz2"
)
@task
def help(c):
"""Print some help"""
print(" available invoke tasks ".center(80, "-"))
c.run("invoke -l")
print()
print(" info from dev_testutils.py ".center(80, "-"))
dev_help_file_path = Path("dev_utils/helpers") / "dev_testutils.py"
with open(dev_help_file_path) as f:
while True:
line = f.readline()
parts = line.split()
if parts:
if parts[0].isupper():
print(line.strip())
if not line:
break
print(" bye ".center(80, "-"))
| [
11748,
25064,
198,
11748,
33245,
198,
11748,
302,
198,
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
4732,
8019,
1330,
4732,
37153,
198,
198,
6738,
26342,
1330,
4876,
198,
11748,
7007,
628,
198,
37811,
51,
6791,
329,
2685,
... | 2.395298 | 2,722 |
# Discord
import discord
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from .utils import checks
from __main__ import send_cmd_help
# Others
import os
from copy import deepcopy, copy
import datetime
import time
import math
import random
try:
import validators
validatorsAvail = True
except:
validatorsAvail = False
class DiscordRPG:
"""The Discord RPG. I mean, *Thee Discord RPG*"""
@commands.group(name='rpgset', pass_context=True)
async def rpgset(self, ctx):
"""Settings for the RPG on this server"""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@rpgset.command(pass_context=True)
async def townname(self, ctx, *, name):
"""Allows you to set a name for this server's Home Town"""
author = ctx.message.author
sid = ctx.message.server
await self.town.set_town_name(ctx, name)
@rpgset.command(pass_context=True)
async def townavatar(self, ctx, *, avatarurl):
"""Allows you to set a new Avatar picture for this server's Home Town"""
# TODOLATER allow attachment grabbing. its possible, but im lazy
author = ctx.message.author
sid = ctx.message.server.id
if validators.url(avatarurl):
await self.town.set_town_avatar(sid, avatarurl)
else:
await self.bot.say("Not a valid URL. Try again.")
@commands.group(name="rpg", pass_context=True)
async def rpg(self, ctx):
"""General RPG stuff."""
if ctx.invoked_subcommand is None:
await send_cmd_help(ctx)
@rpg.command(pass_context=True)
@checks.admin_or_permissions(manage_server=True)
async def signup(self, ctx):
"""Allows an admin or moderator to signup this server into the RPG"""
await self.town.create_town(ctx)
@rpg.command(pass_context=True)
async def character(self, ctx):
"""Character options menu"""
author = ctx.message.author
sid = ctx.message.server
current_player = {}
player_exists = await self.player.check_player(author.id)
if player_exists:
current_player = await self.player.get_player_records(author.id)
else:
await self.bot.say("You have not yet joined the RPG. Please register using `{}rpg register`".format(ctx.prefix))
return
embed = discord.Embed(title="Options for {}".format(current_player[
'CharName']), description="Use the numbers to make a choice.", colour=0xfd0000)
embed.add_field(
name='Options', value="`1.` Get Character Sheet\n`2.` Change Avatar\n`3.` Change Bio\n`4.` View Home Town", inline=False)
embed.set_author(name='{}'.format(author.name),
icon_url='{}'.format(author.avatar_url))
embed.set_thumbnail(
url='https://i.ytimg.com/vi/Pq824AM9ZHQ/maxresdefault.jpg')
await self.bot.say("", embed=embed)
response = await self.bot.wait_for_message(author=author)
if '1' in response.content:
# Return the character sheet
await self.player.getCharacterSheet(author)
elif '2' in response.content:
# Grab url, validate it and save to the players profile in
# players.json
await self.bot.say("Please provide me with a url only, to use as an image for your character sheet.")
# TODOLATER allow attachment grabbing. its possible, but im lazy
avatarurl = await self.bot.wait_for_message(author=author)
if validators.url(avatarurl.content):
await self.player.setProfileAvatar(author.id, avatarurl.content)
else:
await self.bot.say("Not a valid URL. Try again.")
elif '3' in response.content:
await self.player.setBio(ctx, author.id)
elif '4' in response.content:
self.town.savetowns()
await self.town.reload_town_records()
await self.town.get_town_sheet(current_player['HomeTownID'])
else:
await self.bot.say("Invalid response. Please try again.")
@rpg.command(pass_context=True, no_pm=False)
async def register(self, ctx):
"""Registers and Creates your RPG Character."""
author = ctx.message.author
sid = ctx.message.server.id
townExists = await self.town.check_town(sid)
if not townExists:
await self.bot.say("Oops. Your town is still in piles of rubble. Please ask an admin or moderator of this channel to get your town started with `{}rpg signup`".format(ctx.prefix))
return
player_exists = await self.player.check_player(author.id)
if player_exists:
await self.bot.say("You are already regsitered. You can use `{}rpg character` to do things.".format(ctx.prefix))
return
await self.bot.say("Thanks for joining {}! We are going to need some information...".format(author.mention))
await self.player._createplayer(ctx)
@rpg.command(pass_context=True, no_pm=False)
async def viewtown(self, ctx):
"""View the details of the guild's town you are currently in"""
sid = ctx.message.server.id
await self.town.get_town_sheet(sid)
@rpg.command(pass_context=True, no_pm=False)
async def viewmonster(self, ctx, *, monsterID):
"""Testing Stub. Please do not use."""
await self.monster.getMonsterSheet(monsterID)
@rpg.command(pass_context=True, no_pm=False)
async def viewtile(self, ctx, locX: int, locY: int):
"""Testing sub. Please do not use."""
user = ctx.message.author
location = {"X" : locX, "Y" : locY}
current_player = await self.player.get_player_records(user.id)
tile = await self.map.map_provider(user, location)
await self.bot.say(tile)
@rpg.command(pass_context=True, no_pm=False)
async def findtile(self, ctx, tile_type):
"""Stub. Do not Use"""
user = ctx.message.author
tile = await self.map.find_tile(tile_type)
await self.bot.say(tile)
@rpg.command(pass_context = True, no_pm = False)
@rpg.command(pass_context=True, no_pm=False)
async def viewplayer(self, ctx, user: discord.Member):
"""Allows you to see the character sheet of another player in the game."""
hasProfile = await self.player.check_player(user.id)
if hasProfile:
await self.player.getCharacterSheet(user)
else:
await self.bot.say("That player does not yet exist. Perhaps consider asking them to join using `{}rpg register`?".format(ctx.prefix))
@rpg.command(pass_context=True, no_pm=False)
async def logout(self, ctx):
"""Logs you out of your current session, if there is one"""
await self._logout(ctx)
@rpg.command(pass_context=True, no_pm=False)
async def play(self, ctx):
"""Runs a session of DiscordRPG"""
# from this point onwards, CTX cannot be used for resources like server ID's.
# Needs to be pulled from the existing resources, in the dicts.
userDO = ctx.message.author
await self.reload_town_records()
current_player = await self.player.get_player_records(userDO.id)
if current_player is None:
await self.bot.say("It seems you have never registered with the rpg."
" Please try `{}rpg register`".format(ctx.prefix))
return
player_town = await self.town.get_town_records(current_player['HomeTownID'])
if player_town is None:
await self.bot.say("Hmmm... It appears your town is still in Rubble unfortunately."
"Torn down by a war long since forgotten. "
"Get an admin of this server to try `{}rpg signup`".format(ctx.prefix))
return
if not self._login(userDO):
await self.bot.say("You already have an Ongoing play session. "
"If you can't find it, please try "
"`{}rpg logout`".format(ctx.prefix))
return
# TODO remove when complete.
await self.bot.say("This is still under construction. Any bugs are please to be reported using `;;contact` followed by the error given. Thanks for testing out DiscordRPG!")
if 'Never' in current_player['Last_Played']:
await self.bot.say("Thank you for signing up. Welcome to your next grand adventure")
await self.first_adventure_town(ctx, current_player, player_town)
| [
2,
39462,
198,
11748,
36446,
198,
6738,
36446,
13,
2302,
1330,
9729,
198,
6738,
269,
18463,
13,
26791,
13,
7890,
9399,
1330,
1366,
9399,
198,
6738,
764,
26791,
1330,
8794,
198,
6738,
11593,
12417,
834,
1330,
3758,
62,
28758,
62,
16794,
... | 2.395477 | 3,626 |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# __file__: edge
# 图像边缘检测
import numpy as np
import cv2
if __name__ == '__main__':
path = './res/river_color_bg.png'
edge(pic_path=path)
| [
2,
0,
1220,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
11593,
7753,
834,
25,
5743,
198,
2,
10263,
249,
122,
161,
225,
237,
164,
122,
117,
163,
120,
246,
162,
96,
2... | 1.875 | 104 |
#!/usr/bin/env python
"""
computes cost of storing camera data
"""
from __future__ import division
# %%
print("quantities are for full data rate")
Zyla = Cam(2560 * 2160, 100, 4, 1500)
print("\n--------------------------")
print("Zyla")
print(f"MB/sec: {Zyla.bytesec/1e6:.1f} GB/hour: {Zyla.bytehour/1e9:.0f}")
print(f"SSD: ${Zyla.HDDcosthour:.2f}/hour")
print(f"{Zyla.hddTB} TB SSD fills in {Zyla.hourstorage:.2f} hours")
NeoDMC = Cam(2560 / 4 * 2160 / 4, 30, 8, cost=220)
print("\n--------------------------")
print("Neo Marshall DMC (4x4 full frame binning)")
print(f"MB/sec: {NeoDMC.bytesec/1e6:.1f} GB/hour: {NeoDMC.bytehour/1e9:.0f}")
print(f"SSD: ${NeoDMC.HDDcosthour:.2f}/hour")
print(f"{NeoDMC.hddTB} TB HDD fills in {NeoDMC.hourstorage:.2f} hours")
U897 = Cam(512 * 512, 56, 8, 220)
print("\n--------------------------")
print("Ultra 897")
print("MB/sec: {:.1f} GB/hour: {:.0f}".format(U897.bytesec / 1e6, U897.bytehour / 1e9))
print("HDD: ${:.2f}/hour".format(U897.HDDcosthour))
print("{} TB HDD fills in {:.1f} hours".format(U897.hddTB, U897.hourstorage))
U888 = Cam(1024 * 1024, 26, 8, 220)
print("\n--------------------------")
print("Ultra 888")
print("MB/sec: {:.1f} GB/hour: {:.0f}".format(U888.bytesec / 1e6, U888.bytehour / 1e9))
print("HDD: ${:.2f}/hour".format(U888.HDDcosthour))
print("{} TB HDD fills in {:.1f} hours".format(U888.hddTB, U888.hourstorage))
# %%
print(
"{} month season {} % retained: {:.1f} TB".format(
U888.monthsseason, U888.goodfrac * 100, U888.TBseason
)
)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
5589,
1769,
1575,
286,
23069,
4676,
1366,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
7297,
628,
198,
198,
2,
43313,
198,
4798,
7203,
40972,
871,
389,
329,
1336,
1366,
... | 2.235808 | 687 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
11,
15720,
602,
628
] | 2.891892 | 37 |
import alsaaudio
import sys
m = alsaaudio.Mixer()
vol = m.getvolume()
if len(sys.argv) == 1 or sys.argv[1] == "g":
print "Current volume is " + str(getVolume()) + " percent"
if len(sys.argv)>=3:
if sys.argv[1] == "i":
try:
i = int(sys.argv[2])
increase(i)
except ValueError:
print "Please specify a number after saying increase volume by"
elif sys.argv[1] == "d":
try:
d = int(sys.argv[2])
decrease(d)
except:
print "Please specify a number after saying decrease volume by"
elif sys.argv[1] == "s":
try:
s = int(sys.argv[2])
setVolume(s)
except:
print "Please specify a number after saying set volume to" | [
11748,
435,
11400,
24051,
201,
198,
11748,
25064,
201,
198,
201,
198,
76,
796,
435,
11400,
24051,
13,
35608,
263,
3419,
201,
198,
10396,
796,
285,
13,
1136,
29048,
3419,
201,
198,
201,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
6624... | 2.196774 | 310 |
#!/usr/bin/env python
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Main file for running VisTrails in server mode"""
import os
import sys
if __name__ == '__main__':
fix_paths()
# Set locale to English
import locale
locale.setlocale(locale.LC_ALL, 'C')
import vistrails.gui.requirements
vistrails.gui.requirements.require_pyqt4_api2()
import vistrails.gui.application_server
try:
optionsDict = {
'interactiveMode': False,
'enablePackagesSilently': False,
'handlerDontAsk': True,
}
v = vistrails.gui.application_server.start_server(optionsDict,
args=sys.argv[1:])
app = vistrails.gui.application_server.VistrailsServer()
except SystemExit, e:
print str(e)
sys.exit(e)
except Exception, e:
import traceback
print "Uncaught exception on initialization: %s" % (
traceback._format_final_exc_line(type(e).__name__, e))
traceback.print_exc()
sys.exit(255)
v = app.run_server()
vistrails.gui.application_server.stop_server()
sys.exit(v)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
29113,
29113,
7804,
4242,
21017,
198,
2235,
198,
2235,
15069,
357,
34,
8,
1946,
12,
5304,
11,
968,
1971,
2059,
13,
198,
2235,
15069,
357,
34,
8,
2813,
12,
4967,
11,
48166,
12,
34220,... | 2.922414 | 1,044 |
from channels.routing import ProtocolTypeRouter, URLRouter
from channels.auth import AuthMiddlewareStack
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'ws/game/(?P<room_name>\w+)/$', consumers.ChatConsumer),
]
application = ProtocolTypeRouter({
# Empty for now (http->django views is added by default)
'websocket': AuthMiddlewareStack(
URLRouter(
websocket_urlpatterns
)
),
})
| [
6738,
9619,
13,
81,
13660,
1330,
20497,
6030,
49,
39605,
11,
10289,
49,
39605,
198,
6738,
9619,
13,
18439,
1330,
26828,
34621,
1574,
25896,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
302,
62,
6978,
198,
6738,
764,
1330,
7008,
198,
1... | 2.653631 | 179 |
import unittest
from nltk_api.tag.sentence import tag_sentences
| [
11748,
555,
715,
395,
198,
198,
6738,
299,
2528,
74,
62,
15042,
13,
12985,
13,
34086,
594,
1330,
7621,
62,
34086,
3007,
628
] | 2.869565 | 23 |
#!/usr/bin/python
"""
Purpose:
"""
from django.core.management.base import BaseCommand
import os
import csv
from IPLcricket import settings
from matches.models import MatchesPlayed, Deliveries
from datetime import datetime
from django.db.utils import DataError
import pandas as pd
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
37811,
198,
30026,
3455,
25,
198,
37811,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
6738,
6101,
43,
66,
5557,
3... | 3.380952 | 84 |
# Copyright 2013 - Noorul Islam K M
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from solumclient.common import base as solum_base
from solumclient.openstack.common.apiclient import base as apiclient_base
from solumclient.openstack.common.apiclient import client
from solumclient.openstack.common.apiclient import fake_client
from solumclient.tests import base as test_base
fixture1 = {
'/foo_resource': {
'GET': (
{},
{'id': 1, 'name': 'foo'}
),
}
}
fixture2 = {
'/foo_resource': {
'GET': (
{},
{'foo_resource': {'id': 1, 'name': 'foo'}}
),
}
}
fixture3 = {
'/foo_resources': {
'GET': (
{},
[
{'id': 1, 'name': 'foo'},
{'id': 2, 'name': 'bar'}
]
),
}
}
fixture4 = {
'/foo_resources': {
'GET': (
{},
{'foo_resources': [
{'id': 1, 'name': 'foo'},
{'id': 2, 'name': 'bar'}
]}
),
}
}
| [
2,
15069,
2211,
532,
1400,
273,
377,
3449,
509,
337,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13... | 2.212377 | 711 |
import torch
def BoxRelationalEmbedding(f_g, dim_g=64, wave_len=1000, trignometric_embedding=True):
"""
Given a tensor with bbox coordinates for detected objects on each batch image,
this function computes a matrix for each image
with entry (i,j) given by a vector representation of the
displacement between the coordinates of bbox_i, and bbox_j
input: np.array of shape=(batch_size, max_nr_bounding_boxes, 4)
output: np.array of shape=(batch_size, max_nr_bounding_boxes, max_nr_bounding_boxes, 64)
"""
# returns a relational embedding for each pair of bboxes, with dimension = dim_g
# follow implementation of https://github.com/heefe92/Relation_Networks-pytorch/blob/master/model.py#L1014-L1055
batch_size = f_g.size(0)
x_min, y_min, x_max, y_max = torch.chunk(f_g, 4, dim=-1)
cx = (x_min + x_max) * 0.5
cy = (y_min + y_max) * 0.5
w = (x_max - x_min) + 1.
h = (y_max - y_min) + 1.
# cx.view(1,-1) transposes the vector cx, and so dim(delta_x) = (dim(cx), dim(cx))
delta_x = cx - cx.view(batch_size, 1, -1)
delta_x = torch.clamp(torch.abs(delta_x / w), min=1e-3)
delta_x = torch.log(delta_x)
delta_y = cy - cy.view(batch_size, 1, -1)
delta_y = torch.clamp(torch.abs(delta_y / h), min=1e-3)
delta_y = torch.log(delta_y)
delta_w = torch.log(w / w.view(batch_size, 1, -1))
delta_h = torch.log(h / h.view(batch_size, 1, -1))
matrix_size = delta_h.size()
delta_x = delta_x.view(batch_size, matrix_size[1], matrix_size[2], 1)
delta_y = delta_y.view(batch_size, matrix_size[1], matrix_size[2], 1)
delta_w = delta_w.view(batch_size, matrix_size[1], matrix_size[2], 1)
delta_h = delta_h.view(batch_size, matrix_size[1], matrix_size[2], 1)
position_mat = torch.cat((delta_x, delta_y, delta_w, delta_h), -1) # bs * r * r *4
if trignometric_embedding == True:
feat_range = torch.arange(dim_g / 8).cuda()
dim_mat = feat_range / (dim_g / 8)
dim_mat = 1. / (torch.pow(wave_len, dim_mat))
dim_mat = dim_mat.view(1, 1, 1, -1)
position_mat = position_mat.view(batch_size, matrix_size[1], matrix_size[2], 4, -1)
position_mat = 100. * position_mat
mul_mat = position_mat * dim_mat
mul_mat = mul_mat.view(batch_size, matrix_size[1], matrix_size[2], -1)
sin_mat = torch.sin(mul_mat)
cos_mat = torch.cos(mul_mat)
embedding = torch.cat((sin_mat, cos_mat), -1)
else:
embedding = position_mat
return (embedding) | [
11748,
28034,
628,
198,
4299,
8315,
6892,
864,
31567,
6048,
278,
7,
69,
62,
70,
11,
5391,
62,
70,
28,
2414,
11,
6769,
62,
11925,
28,
12825,
11,
491,
570,
16996,
62,
20521,
12083,
28,
17821,
2599,
198,
220,
220,
220,
37227,
198,
22... | 2.252669 | 1,124 |
import yaml
import logging
import logging.config
from utils.executable import get_destination
| [
11748,
331,
43695,
198,
11748,
18931,
198,
11748,
18931,
13,
11250,
198,
198,
6738,
3384,
4487,
13,
18558,
18187,
1330,
651,
62,
16520,
1883,
628
] | 3.84 | 25 |
__version__ = '1.7.1.2'
| [
834,
9641,
834,
796,
705,
16,
13,
22,
13,
16,
13,
17,
6,
198
] | 1.714286 | 14 |
#!/usr/bin/env python3
import sys
import numpy
#import matplotlib
#matplotlib.use("SVG")
import pylab
pylab.rcParams["font.size"]=8
pylab.rcParams["legend.fontsize"]=8
#pylab.rcParams["lines.linewidth"]=1
#pylab.rcParams["axes.linewidth"]=2
#pylab.rcParams["axes.labelsize"]="large"
#pylab.rcParams["axes.labelweight"]="bold"
pylab.rcParams["xtick.major.size"]=0
pylab.rcParams["xtick.minor.size"]=0
pylab.rcParams["ytick.major.size"]=0
pylab.rcParams["ytick.minor.size"]=0
#pylab.rcParams["xtick.direction"]="out"
#pylab.rcParams["ytick.direction"]="out"
#pylab.rcParams["figure.figsize"]=(3, 3)
#activity
colormap="hot" #"jet", "bwr"
spikeEsom=numpy.loadtxt("spike_som.csv", delimiter=",")
spikeEdnd=numpy.loadtxt("spike_dnd.csv", delimiter=",")
inhinput=numpy.loadtxt("inhinput_dnd.csv", delimiter=",")
som_max=0.16
som_min=0.0
dnd_max=1.0
dnd_min=0.0
time=spikeEsom[:,0]
pylab.clf()
pylab.figure(figsize=(6,3))
pylab.subplot2grid([4,1],[0,0], rowspan=2)
pylab.imshow(spikeEsom[:, 1:].T, aspect="auto", interpolation="none", cmap=colormap, extent=[spikeEsom[0,0], spikeEsom[-1,0], len(spikeEsom[0,1:]), 1], vmax=som_max, vmin=som_min)
#limit=numpy.max(numpy.abs(xE[:,1:]))
#pylab.clim([-limit, limit])
#pylab.colorbar()
pylab.xticks([])
#pylab.xlabel("time [s]")
pylab.ylabel("neuron #")
pylab.subplot2grid([4,1],[2,0], rowspan=2)
pylab.imshow(spikeEdnd[:, 1:].T, aspect="auto", interpolation="none", cmap=colormap, extent=[spikeEdnd[0,0], spikeEdnd[-1,0], len(spikeEdnd[0,1:]), 1], vmax=dnd_max, vmin=dnd_min)
#limit=numpy.max(numpy.abs(xE[:,1:]))
#pylab.clim([-limit, limit])
#pylab.colorbar()
pylab.xlabel("time [s]")
pylab.ylabel("neuron #")
pylab.tight_layout()
pylab.savefig("spike.pdf")
#part
part_len=10*100
part_num=int(len(spikeEsom[:,0])//part_len)
for i in range(part_num):
pylab.clf()
pylab.figure(figsize=(3.5,3))
pylab.subplot(3,1,1)
pylab.imshow(spikeEsom[i*part_len:(i+1)*part_len, 1:].T, aspect="auto", interpolation="none", cmap=colormap, extent=[spikeEsom[i*part_len,0], spikeEsom[(i+1)*part_len-1,0], len(spikeEsom[0,1:]), 1], vmax=som_max, vmin=som_min)
pylab.colorbar()
pylab.xticks([])
pylab.yticks([1, len(spikeEsom[0,1:])])
#pylab.xlabel("time [s]")
pylab.ylabel("CA3 firing rate")
pylab.subplot(3,1,2)
pylab.imshow(spikeEdnd[i*part_len:(i+1)*part_len, 1:].T, aspect="auto", interpolation="none", cmap=colormap, extent=[spikeEdnd[i*part_len,0], spikeEdnd[(i+1)*part_len-1,0], len(spikeEdnd[0,1:]), 1], vmax=dnd_max, vmin=dnd_min)
pylab.colorbar()
pylab.xticks([])
pylab.yticks([1, len(spikeEsom[0,1:])])
#pylab.xlabel("time [s]")
pylab.ylabel("dendritic\nactivity")
pylab.subplot(3,1,3)
pylab.imshow(inhinput[i*part_len:(i+1)*part_len, 1:].T, aspect="auto", interpolation="none", cmap=colormap, extent=[inhinput[i*part_len,0], inhinput[(i+1)*part_len-1,0], len(inhinput[0,1:]), 1], vmin=0.0)
pylab.colorbar()
pylab.yticks([1, len(spikeEsom[0,1:])])
pylab.xlabel("time [s]")
pylab.ylabel("inhibitory inputs\nto dendrites")
pylab.tight_layout()
pylab.savefig("spike_part"+str(i)+".pdf")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
299,
32152,
198,
2,
11748,
2603,
29487,
8019,
198,
2,
6759,
29487,
8019,
13,
1904,
7203,
50,
43490,
4943,
198,
11748,
279,
2645,
397,
198,
198,
79,
... | 1.99173 | 1,572 |
import datetime
now = datetime.datetime.now()
print(now)
print(now.replace(1999, 12, 31))
print(now)
# インスタンスの操作ではなく、新しいインスタンスを返すらしい。インスタンスメソッドなのに。これはわかりづらい。
| [
11748,
4818,
8079,
198,
2197,
796,
4818,
8079,
13,
19608,
8079,
13,
2197,
3419,
198,
4798,
7,
2197,
8,
198,
4798,
7,
2197,
13,
33491,
7,
18946,
11,
1105,
11,
3261,
4008,
198,
4798,
7,
2197,
8,
198,
2,
220,
11482,
6527,
8943,
23376... | 1.59596 | 99 |
# strassens_algorithm.py
# A demonstration of Strassen's subcubic runtime matrix multiplication
# algorithm on square matrices using the divide and conquer model.
import numpy as np
if __name__ == "__main__":
main() | [
2,
965,
562,
641,
62,
282,
42289,
13,
9078,
201,
198,
201,
198,
2,
317,
13646,
286,
4285,
562,
268,
338,
850,
66,
549,
291,
19124,
17593,
48473,
220,
201,
198,
2,
11862,
319,
6616,
2603,
45977,
1262,
262,
14083,
290,
23875,
2746,
... | 3.121622 | 74 |
#!/usr/bin/python
""" webserver.py - Flask based web server to handle all legal requests.
Copyright (C) 2019 Basler AG
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import http
def set_led(red, green, blue):
""" Use the hucon eye driver to set the eye color.
"""
try:
from hucon import Eye
Eye(1, Eye.RGB).set_color(red, green, blue)
Eye(2, Eye.RGB).set_color(red, green, blue)
Eye(3, Eye.RGB).set_color(red, green, blue)
Eye(4, Eye.RGB).set_color(red, green, blue)
except Exception as ex:
print(ex)
# Set the led eyes to yellow at the beginning
set_led(249, 166, 2)
import argparse
import logging
import threading
import time
try:
import httplib
except:
import http.client as httplib
from flask import Flask
from flask import request
from flask import render_template
from flask_socketio import SocketIO
from HuConJsonRpc import HuConJsonRpc
json_rpc = HuConJsonRpc()
COLLECT_STATIC_ROOT = "/opt/hucon/webserver/static"
COLLECT_STORAGE = 'flask_collect.storage.file'
app = Flask(json_rpc._SERVER_NAME)
app.config["SECRET_KEY"] = "SECRET_KEY"
socketio = SocketIO(app, logger=True)#, async_mode='eventlet'
@app.context_processor
def detect_browser_language():
""" Returns the current user browser language.
"""
supported_browser_languages = ["en", "de"]
lang = request.accept_languages.best_match(supported_browser_languages)
if lang == None:
lang = 'en'
return dict(browser_language=lang)
@app.route('/')
@app.route('/index.html')
def index():
""" Returns index page
"""
return render_template('index.html')
@app.route('/blockly.html')
def blockly():
""" Returns blockly programming page
"""
return render_template('blockly.html')
@app.route('/editor.html')
def editor():
""" Returns python editor page
"""
return render_template('editor.html')
@app.route('/mobile.html')
def mobile():
""" Returns mobile page
"""
return render_template('mobile.html')
@app.route('/settings.html')
def settings():
""" Returns settings page
"""
return render_template('settings.html')
@app.route('/remote_control.html')
def remote_control():
""" Returns remote control page
"""
return render_template('remote_control.html')
@app.route('/API', methods=['GET', 'POST'])
def api():
""" Returns api page or handle the request on POST
"""
if request.method == 'POST':
data = request.get_json(force=True)
if not data:
return ('Bad Request.', 400)
return json_rpc.handle_control(data)
return render_template('api.html')
@app.before_first_request
def before_first_request():
""" Set the eyes to green and after a while to off.
This will gibe the user teh ability to see that the service is running.
"""
set_led(0, 255, 0)
time.sleep(2)
set_led(0, 0, 0)
def check_service():
""" Check if the page is running.
"""
not_started = True
while not_started:
time.sleep(10)
try:
conn = http.client.HTTPConnection('localhost', json_rpc._LISTENING_PORT, timeout=5)
conn.request('GET', '/')
res = conn.getresponse()
if res.status == 200:
not_started = False
except Exception as ex:
print(ex)
if __name__ == '__main__':
""" Create the Server and listen on each incoming request.
"""
parser = argparse.ArgumentParser(description='Start the %s web server.' % json_rpc._SERVER_NAME)
parser.add_argument('--debug',
dest='debug',
action='store_true',
help='Print more debug messages on the console during running.')
args = parser.parse_args()
if not args.debug:
# Reduce the log messages.
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
# Run a thread to check the flask service.
# thread = threading.Thread(target=check_service)
# thread.start()
socketio.start_background_task(target=check_service)
socketio.run(app, host='0.0.0.0', port=json_rpc._LISTENING_PORT, debug=args.debug)
| [
2,
48443,
14629,
14,
8800,
14,
29412,
201,
198,
37811,
2639,
18497,
13,
9078,
532,
46947,
1912,
3992,
4382,
284,
5412,
477,
2742,
7007,
13,
201,
198,
201,
198,
220,
220,
220,
15069,
357,
34,
8,
13130,
6455,
1754,
13077,
201,
198,
22... | 2.420173 | 1,854 |
# Copyright 2019 Novartis Institutes for BioMedical Research Inc. Licensed
# under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0. Unless
# required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
import os
import logging
import socket
cellxgene_location = os.environ.get("CELLXGENE_LOCATION")
cellxgene_data = os.environ.get("CELLXGENE_DATA")
gateway_port = int(os.environ.get("GATEWAY_PORT", "5005"))
external_host = os.environ.get("EXTERNAL_HOST", os.environ.get("GATEWAY_HOST", f"localhost:{gateway_port}"))
external_protocol = os.environ.get("EXTERNAL_PROTOCOL", os.environ.get("GATEWAY_PROTOCOL", "http"))
ip = os.environ.get("GATEWAY_IP")
extra_scripts = os.environ.get("GATEWAY_EXTRA_SCRIPTS")
ttl = os.environ.get("GATEWAY_TTL")
enable_upload = os.environ.get("GATEWAY_ENABLE_UPLOAD", "").lower() in ['true', '1']
enable_annotations = os.environ.get("GATEWAY_ENABLE_ANNOTATIONS", "").lower() in ['true', '1']
env_vars = {
"CELLXGENE_LOCATION": cellxgene_location,
"CELLXGENE_DATA": cellxgene_data,
"GATEWAY_IP": ip,
}
optional_env_vars = {
"EXTERNAL_HOST": external_host,
"EXTERNAL_PROTOCOL": external_protocol,
"GATEWAY_PORT": gateway_port,
"GATEWAY_EXTRA_SCRIPTS": extra_scripts,
"GATEWAY_TTL": ttl,
"GATEWAY_ENABLE_UPLOAD": enable_upload,
"GATEWAY_ENABLE_ANNOTATIONS": enable_annotations,
}
| [
2,
15069,
13130,
5267,
433,
271,
33656,
329,
16024,
37158,
4992,
3457,
13,
49962,
198,
2,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
407,
779,
198,
2,
428,
2393,
2845,
287,
11846,
351,
26... | 2.741379 | 638 |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: flat
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
| [
2,
6338,
7560,
416,
262,
21939,
36474,
364,
17050,
11,
466,
407,
13096,
198,
198,
2,
25745,
25,
6228,
198,
198,
11748,
6228,
36873,
364,
198,
6738,
6228,
36873,
364,
13,
5589,
265,
1330,
1330,
62,
77,
32152,
198,
37659,
796,
1330,
6... | 3.21875 | 64 |
#!/usr/bin/env python3
import aiohttp
import asyncio
import concurrent
import datetime
import jinja2
import logging
import os
# This is our native database module
import blabber_database
from aiohttp import web
logger = logging.getLogger(__name__)
DEV = True
ROOT_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
DATABASE_PATH = "./blabber.db" # File path of our database file
DATABASE_CACHE_SIZE = (10 * 2**20) // 4096; # Memory cache size (unit is blocks of 4 KiB)
# Called from html templates
# Run the database operations in a worker thread so we don't block other network I/O.
# The database does not support multithreading right now, so more than one worker would be useless.
# We only allow a maximum number of pending operations in case our database is too slow
# to handle the incoming requests (they would queue up without bounds otherwise).
# Returns path to the index page.
# Returns the path to the page for the post with the given id.
# Returns the path to the submit-post endpoint.
# Returns the path to the submit-comment endpoint.
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
257,
952,
4023,
198,
11748,
30351,
952,
198,
11748,
24580,
198,
11748,
4818,
8079,
198,
11748,
474,
259,
6592,
17,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
2,
77... | 3.235294 | 357 |
"""
Script goal,
Test out the google earth engine to see what i can do
- find a landsat collection for a single point
"""
#==============================================================================
__title__ = "GEE Movie Maker"
__author__ = "Arden Burrell"
__version__ = "v1.0(04.04.2019)"
__email__ = "arden.burrell@gmail.com"
#==============================================================================
# +++++ Check the paths and set ex path to fireflies folder +++++
import os
import sys
if not os.getcwd().endswith("fireflies"):
if "fireflies" in os.getcwd():
p1, p2, _ = os.getcwd().partition("fireflies")
os.chdir(p1+p2)
else:
raise OSError(
"This script was called from an unknown path. CWD can not be set"
)
sys.path.append(os.getcwd())
#==============================================================================
# Import packages
import numpy as np
import pandas as pd
import geopandas as gpd
import argparse
import datetime as dt
import warnings as warn
import xarray as xr
import bottleneck as bn
import scipy as sp
import glob
import time
from collections import OrderedDict
from scipy import stats
from numba import jit
# Import the Earth Engine Python Package
import ee
import ee.mapclient
from ee import batch
from geetools import batch as gee_batch
# from netCDF4 import Dataset, num2date, date2num
# from scipy import stats
# import statsmodels.stats.multitest as smsM
# Import plotting and colorpackages
import matplotlib.pyplot as plt
import matplotlib.colors as mpc
import matplotlib as mpl
import palettable
import fiona
fiona.drvsupport.supported_drivers['kml'] = 'rw' # enable KML support which is disabled by default
fiona.drvsupport.supported_drivers['KML'] = 'rw' # enable KML support which is disabled by default
# import seaborn as sns
# import cartopy.crs as ccrs
# import cartopy.feature as cpf
# from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import geopy.distance as geodis
import myfunctions.corefunctions as cf
# # Import debugging packages
# import socket
# print(socket.gethostname())
import ipdb
print("numpy version : ", np.__version__)
print("pandas version : ", pd.__version__)
print("xarray version : ", xr.__version__)
#==============================================================================
#==============================================================================
def GEE_geotifexp(coords, spath, program, fails = None):
""" function takes in coordinate infomation and begins the save out processs """
try:
geom = ee.Geometry.Polygon([
[coords.lonr_min[0],coords.latr_min[0]],
[coords.lonr_max[0],coords.latr_min[0]],
[coords.lonr_max[0],coords.latr_max[0]],
[coords.lonr_min[0],coords.latr_max[0]]])
except:
geom = ee.Geometry.Polygon([
[coords.lonr_min,coords.latr_min],
[coords.lonr_max,coords.latr_min],
[coords.lonr_max,coords.latr_max],
[coords.lonr_min,coords.latr_max]])
# ========== Rename the LS8 bands to match landsat archive ==========
# ========== Define the image collection ==========
# bandlist = ['B4','B3', 'B2', 'B1']
# ========== setup and reverse the bandlists ==========
bandlist = ['B', 'G', 'R', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa']
bandlist.reverse()
# program = "sentinal"
if program == "LANDSAT":
dschoice = "SR"#
dsinfom = "LANDSAT_5_7_8"
dsbands = "SNRGB"
# dschoice = "TOA"
ls8c = "LANDSAT/LC08/C01/T1_%s" % dschoice
L5coll = ee.ImageCollection(
"LANDSAT/LT05/C01/T1_%s" % dschoice).filter(
ee.Filter.lt('CLOUD_COVER',15)).map(
renamebandsETM).filterBounds(geom).select(bandlist)
L7coll = ee.ImageCollection(
'LANDSAT/LE07/C01/T1_%s' % dschoice).filter(
ee.Filter.lt('CLOUD_COVER',15)).map(
renamebandsETM).filterBounds(geom).map(LS7fix).select(bandlist)
L8coll = ee.ImageCollection(
ls8c).filter(
ee.Filter.lt('CLOUD_COVER', 15)).map(
renamebands).filterBounds(geom).select(bandlist)
collection = ee.ImageCollection(L5coll.merge(L7coll.merge(L8coll))).sort('system:time_start', True)
else:
ipdb.set_trace()
sys.exit()
# ========== Fetch the dates ==========
info = []
for elem in collection.getInfo()["features"]:
utime = elem["properties"]['system:time_start']
sat = elem["properties"]["SATELLITE"]
try:
if sat =='LANDSAT_7':
uid = elem["properties"]['system:index']
else:
uid = elem["properties"]['LANDSAT_ID']
except KeyError:
ipdb.set_trace()
info.append({"satellite":sat, "time":utime, "unid":uid })
# ========== convert dates to pandas dataframe ==========
df = pd.DataFrame(info)
df["date"] = pd.to_datetime(df["time"], unit='ms', origin='unix')
# df.to_csv("%s%s/%s_%s_%s_timeinfo.csv" % (spath, coords["name"], dsinfom, coords["name"], dsbands))
# coords.to_csv("%s%s/%s_%s_gridinfo.csv" % (spath, coords["name"], program, coords["name"]))
# sys.exit()
# gee_batch.imagecollection.toDrive(
# collection,
# "FIREFLIES_geotifs" ,
# namePattern='%s_%s_%s_%s_{system_date}_{id}' % (dsbands, dsinfom, coords["name"], dsbands),
# region=geom,
# crs = "EPSG:4326",
# fileFormat='GeoTIFF'
# )
print("Starting to create GeoTIFF's for %s at:" % coords["name"], pd.Timestamp.now())
print("Attempting manual creation")
# ========== Convert the collection into a selection of images
img_list = collection.toList(collection.size())
for nx, info in df.iterrows():
# ========== Built to allow for scripts to be redone ==========
if not fails is None:
if not nx in fails:
continue
# ========== convert the datatype ==========
img = ee.Image(img_list.get(nx)).toFloat()
# ========== Create the name and path ==========
name = '%s_%s_%s_%04d' % (dsinfom, coords["name"], dsbands, nx)
folder = "FIREFLIES_geotifs"
string = "\r Sending image %d of %d to the cloud for processing" % (nx, df.index.max())
sys.stdout.write(string)
sys.stdout.flush()
# ========== Send the task to the cloud ==========
try:
task = ee.batch.Export.image.toDrive(
image=img,
description=name,
folder=folder,
crs = "EPSG:4326",
region=(
[coords.lonr_min[0],coords.latr_min[0]],
[coords.lonr_max[0],coords.latr_min[0]],
[coords.lonr_max[0],coords.latr_max[0]],
[coords.lonr_min[0],coords.latr_max[0]]),
scale=30,
fileFormat='GeoTIFF')
except:
task = ee.batch.Export.image.toDrive(
image=img,
description=name,
folder=folder,
crs = "EPSG:4326",
region=(
[coords.lonr_min,coords.latr_min],
[coords.lonr_max,coords.latr_min],
[coords.lonr_max,coords.latr_max],
[coords.lonr_min,coords.latr_max]),
scale=30,
fileFormat='GeoTIFF')
try:
process = batch.Task.start(task)
except Exception as er:
sle = 0
print(str(er))
warn.warn("Hit a task limit, sleeping for an hour to let tasks complete")
while sle < 61:
sle += 1
string = "\r Starting sleep number %d at %s" % (sle, str(pd.Timestamp.now()))
sys.stdout.write(string)
sys.stdout.flush()
time.sleep(60)
process = batch.Task.start(task)
# sys.exit()
# ========== Code for old video export ==========
oldvideo = False
if oldvideo:
# This is the way to use the google earth engine to make videos, i've
# left the code here in case i need it again in the future
## Convert bands to output video
if dschoice == "TOA":
outputVideo = collection.map(convertBit)
else:
outputVideo = collection.map(convertBitV2)
if len(bandlist)> 3:
outputVideo = outputVideo.select(['B3', 'B2', 'B1'])
testfirst = False
if testfirst:
task_config = {
# 'description': 'imageToDriveTestExample',
'scale': 30,
'region': geom,
"crs" : "EPSG:4326",
"fileFormat":'GeoTIFF'
}
task = batch.Export.image.toDrive(outputVideo.first(), "testimage", task_config)
task.start()
# Export video to Google Drive
print("Starting to create video for %s at:" % coords["name"], pd.Timestamp.now())
out = batch.Export.video.toDrive(
outputVideo, description='%s_%s_%s' % (dsinfom, coords["name"], dsbands),
folder = "/GEE_VIDEO_EXPORTS",
framesPerSecond = 1, #dimensions = 1080,
region=(
[coords.lonr_min[0],coords.latr_min[0]],
[coords.lonr_max[0],coords.latr_min[0]],
[coords.lonr_max[0],coords.latr_max[0]],
[coords.lonr_min[0],coords.latr_max[0]]),
crs = "EPSG:4326",
maxFrames=10000)
process = batch.Task.start(out)
print("Process sent to cloud")
if fails is None:
# ========== Save out the relevant infomation ==========
df.to_csv("%s%s/%s_%s_%s_timeinfo.csv" % (spath, coords["name"], dsinfom, coords["name"], dsbands))
coords.to_csv("%s%s/%s_%s_gridinfo.csv" % (spath, coords["name"], program, coords["name"]))
# ========== Going to sleep to give GEE a rest before i slam it with new requests ==========
print("\n Starting 20 minutes of sleep at", pd.Timestamp.now(), "\n")
sle = 0
while sle < 20:
sle += 1
string = "\r Starting sleep number %d at %s" % (sle, str(pd.Timestamp.now()))
sys.stdout.write(string)
sys.stdout.flush()
time.sleep(60)
#==============================================================================
#==============================================================================
#==============================================================================
## Make 8 bit data
def geom_builder(site = "Burn2015 UP"):
"""
function to make the geometery
"""
# ========== Load the site data ==========
pointfn = "./data/field/Points.kml"
pointdt = gpd.read_file(pointfn, driver="kml")
sitenm = []
latit = []
longi = []
year = []
# ========== Loop over the names 2019 ==========
for nm in pointdt.Name:
if nm in ["Burn2015 UP", "GROUP BOX2 TRANS1-6"]:
sitenm.append(nm)
latit.append(pointdt[pointdt.Name == nm].geometry.y.values[0])
longi.append(pointdt[pointdt.Name == nm].geometry.x.values[0])
year.append(2019)
elif "GROUP BOX" in nm:
pass
elif nm[-2:] == '-0':
sitenm.append(nm)
latit.append(pointdt[pointdt.Name == nm].geometry.y.values[0])
longi.append(pointdt[pointdt.Name == nm].geometry.x.values[0])
year.append(2019)
# ========== add 2018 ==========
fd18 = pd.read_csv("./data/field/2018data/siteDescriptions18.csv")
fd18.sort_values(by=["site number"],inplace=True)
for nx, row in fd18.iterrows():
sitenm.append("Site%02d" % row["site number"])
latit.append(row.lat)
longi.append(row.lon)
year.append(2018)
# ========== add 2017 ==========
fd17 = pd.read_csv("./data/field/2017data/siteDescriptions17.csv")
fd17.sort_values(by=["site number"],inplace=True)
for nx, row in fd17.iterrows():
stnm = "Site%02d" % row["site number"]
if not stnm in sitenm:
sitenm.append(stnm)
latit.append(row.strtY)
longi.append(row.strtX)
year.append(2017)
# ========== Check the distance ==========
# def distance_check(p1, p2):
# pass
STdf = pd.DataFrame({"siteds":sitenm, "lat":latit , "lon":longi , "year":year})
STdf["Skip"] = 0
STdf["SkipSite"] = ""
for nx, row in STdf.iterrows():
if STdf["Skip"][nx] > 0:
# THis location has laready been skipped
continue
else:
dist = np.array([geodis.distance((row.lat, row.lon), (lat, lon)).km for lat, lon in zip(STdf.lat[nx+1:].values, STdf.lon[nx+1:].values)])
STdf["Skip"][nx+1:] += (dist<1).astype(int)
close = [easy(inp, row.siteds) for inp in (dist<1)]
STdf["SkipSite"][nx+1:] = STdf["SkipSite"][nx+1:].values + close
# ipdb.set_trace()
df = STdf[STdf.Skip == 0].reset_index(drop=True)
def _sitemaker(site, sampleset, ds, dsn, sitinfoLS, lat, lon):
""" wrapper to pull out site info as needed """
# ========== Pull out the location of a point ==========
# lon = pointdt[pointdt.Name == site].geometry.x.values
# lat = pointdt[pointdt.Name == site].geometry.y.values
# ========== Check if the site has already been built ==========
if dsn == "COPERN": # The site has not been built yet
# ========== set the key params ==========
boxs = 5 # number of grid cells considered
ident = "r" # The indertifing code of the dataset
# ========== Create a container ==========
coords = OrderedDict()
# ========== Get values ready to export ==========
if site == "Burn2015 UP":
coords["name"] = "TestBurn"
elif site == "GROUP BOX2 TRANS1-6":
coords["name"] = "G2T1-6"
else:
coords["name"] = site
coords["set"] = sampleset
coords["lon"] = lon
coords["lat"] = lat
# ========== Build the empty parts of the Ordered dic ==========
for va_nm in ["r", "b_COP", "b_MOD"]:
for ll in ["lon", "lat"]:
for mm in ["max", "min"]:
coords["%s%s_%s" % (ll, va_nm, mm)] = 0
else:
if dsn == "MODIS":
boxs = 3 # number of grid cells considered
ident = "b_MOD" # The indertifing code of the dataset
elif dsn == "COPERN_BA":
boxs = 5 # number of grid cells considered
ident = "b_COP" # The indertifing code of the dataset
coords = sitinfoLS[site]
gr_bx = ds_gr.sel({"latitude":lat, "longitude":lon}, method="nearest")
# ========== Work out the edges of the grid box ==========
latstep = abs(np.unique(np.round(np.diff(ds_gr.latitude.values), decimals=9)))/2.0
lonstep = abs(np.unique(np.round(np.diff(ds_gr.longitude.values), decimals=9)))/2.0
if boxs == 3:
coords["lon%s_max" % ident] = float((gr_bx.longitude.values + (lonstep*2)) + lonstep)
coords["lon%s_min" % ident] = float((gr_bx.longitude.values - (lonstep*2)) - lonstep)
coords["lat%s_max" % ident] = float((gr_bx.latitude.values + (latstep*2)) + latstep)
coords["lat%s_min" % ident] = float((gr_bx.latitude.values - (latstep*2)) - latstep)
# ipdb.set_trace()
elif boxs == 5:
coords["lon%s_max" % ident] = float((gr_bx.longitude.values + 2*(lonstep*2)) + lonstep)
coords["lon%s_min" % ident] = float((gr_bx.longitude.values - 2*(lonstep*2)) - lonstep)
coords["lat%s_max" % ident] = float((gr_bx.latitude.values + 2*(latstep*2)) + latstep)
coords["lat%s_min" % ident] = float((gr_bx.latitude.values - 2*(latstep*2)) - latstep)
sitinfoLS[site] = coords
return sitinfoLS #coords
# ========== setup an ordered dict of the names ==========
sitinfoLS = OrderedDict()
local_data = datasets()
for dsn in ["COPERN", "COPERN_BA", "MODIS"]:
print(dsn)
ldsi = local_data[dsn]
# ========== load in the grid data ==========
if os.path.isfile(ldsi["fname"]):
ds_gr = xr.open_dataset(
ldsi["fname"],
chunks=ldsi["chunks"])[ldsi["var"]].rename(ldsi["rename"]).isel(time=0)
else:
ipdb.set_trace()
# for nm in sitenm:
for nx, row in df.iterrows():
sitinfoLS = _sitemaker(row.siteds, row.year, ds_gr, dsn, sitinfoLS, row.lat, row.lon)
# ========== Close the dataset ==========
ds_gr = None
# ipdb.set_trace()
return pd.DataFrame(sitinfoLS).transpose()[sitinfoLS["Burn2015 UP"].keys()]
def Field_data(year = 2018):
"""
# Aim of this function is to look at the field data a bit
To start it just opens the file and returns the lats and longs
i can then use these to look up netcdf fils
"""
# ========== Load in the relevant data ==========
if year == 2018:
fd18 = pd.read_csv("./data/field/2018data/siteDescriptions18.csv")
else:
fd18 = pd.read_csv("./data/field/2018data/siteDescriptions17.csv")
fd18.sort_values(by=["site number"],inplace=True)
# ========== Create and Ordered Dict for important info ==========
info = OrderedDict()
info["sn"] = fd18["site number"]
try:
info["lat"] = fd18.lat
info["lon"] = fd18.lon
info["RF"] = fd18.rcrtmnt
except AttributeError:
info["lat"] = fd18.strtY
info["lon"] = fd18.strtX
info["RF"] = fd18.recruitment
# ========== function to return nan when a value is missing ==========
# info[den] = [_missingvalfix(
# fcut[fcut.sn == sn][den].values) for sn in info['sn']]
# info["RF17"] = [_missingvalfix(
# fcut[fcut.sn == sn]["RF2017"].values) for sn in info['sn']]
info["fireyear"] = [_fireyear(fyv) for fyv in fd18["estimated fire year"].values]
# ========== Convert to dataframe and replace codes ==========
RFinfo = pd.DataFrame(info).set_index("sn")
return RFinfo
def string_format(string, replacement):
""" Format a string using variables (as str.format) """
s = ee.String(string)
repl = ee.Dictionary(replacement)
keys = repl.keys()
values = repl.values().map(lambda v: ee.Algorithms.String(v))
z = keys.zip(values)
newstr = z.iterate(wrap, s)
return ee.String(newstr)
def convertDataType(newtype):
""" Convert an image to the specified data type
:param newtype: the data type. One of 'float', 'int', 'byte', 'double',
'Uint8','int8','Uint16', 'int16', 'Uint32','int32'
:type newtype: str
:return: a function to map over a collection
:rtype: function
"""
return wrap
#==============================================================================
if __name__ == '__main__':
# ========== Set the args Description ==========
description='Script to make movies'
parser = argparse.ArgumentParser(description=description)
# ========== Add additional arguments ==========
parser.add_argument(
"-s", "--site", type=str, default=None, help="Site to work with ")
# parser.add_argument(
# "--gparts", type=int, default=None,
# help="the max partnumber that has not been redone")
parser.add_argument(
"-f", "--force", action="store_true",
help="the max partnumber that has not been redone")
args = parser.parse_args()
# ========== Call the main function ==========
main(args)
else:
warn.warn("called from another script")
# ========== Set the args Description ==========
description='Script to make movies'
parser = argparse.ArgumentParser(description=description)
# ========== Add additional arguments ==========
parser.add_argument(
"-s", "--site", type=str, default=None, help="Site to work with ")
# parser.add_argument(
# "-x", "--cordforce", action="store_true",
# help="just produce the cordinates without sending things to google earth engine")
parser.add_argument(
"-f", "--force", action="store_true",
help="the max partnumber that has not been redone")
args = parser.parse_args()
# ========== Call the main function ==========
main(args)
| [
37811,
198,
7391,
3061,
11,
220,
198,
198,
14402,
503,
262,
23645,
4534,
3113,
284,
766,
644,
1312,
460,
466,
198,
197,
12,
1064,
257,
8604,
265,
4947,
329,
257,
2060,
966,
220,
198,
198,
37811,
198,
2,
23926,
25609,
855,
198,
198,
... | 2.492777 | 7,338 |
"""
Problem Description:
Draw the pattern specified using loops.
Input:
Output:
The pattern as shown in sample output.
First line contain 4 space and 1 star, next line contains 3 space and 3 stars and so on..
Constraints:
Try not to hardcode the output.
Sample Input:
Sample Output:
*
***
*****
*******
*********
"""
for i in range(5):
print(" " * (4 - i), end = "")
print("*" * ((2 * i) + 1))
| [
37811,
198,
40781,
12489,
25,
198,
25302,
262,
3912,
7368,
1262,
23607,
13,
198,
198,
20560,
25,
198,
198,
26410,
25,
198,
464,
3912,
355,
3402,
287,
6291,
5072,
13,
198,
5962,
1627,
3994,
604,
2272,
290,
352,
3491,
11,
1306,
1627,
... | 2.943662 | 142 |
# Generated by Django 3.0.2 on 2020-01-20 14:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
17,
319,
12131,
12,
486,
12,
1238,
1478,
25,
1314,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
142... | 3.019231 | 52 |
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Unit tests for OpenStack Cinder volume drivers."""
import mock
from hplefthandclient import exceptions as hpexceptions
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder import units
from cinder.volume.drivers.san.hp import hp_lefthand_iscsi
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
| [
2,
220,
220,
220,
357,
66,
8,
15069,
1946,
30446,
15503,
12,
11869,
446,
7712,
5834,
11,
406,
13,
47,
13,
198,
2,
220,
220,
220,
1439,
6923,
33876,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362... | 3.356725 | 342 |
from collections import Counter
# import matplotlib.pyplot as plt
import os
import scipy.stats
from sklearn.metrics import make_scorer
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RandomizedSearchCV
from sklearn_crfsuite import CRF
from sklearn_crfsuite import scorers
from sklearn_crfsuite import metrics
| [
6738,
17268,
1330,
15034,
198,
2,
1330,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28686,
198,
11748,
629,
541,
88,
13,
34242,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
787,
62,
1416,
11934,
198,
6738,
1341,
35... | 3.455446 | 101 |
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import Queue
import threading
from translators.google import Google
from translators.ut import UT
from translators.tilde import Tilde
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
220,
532,
9,
12,
21004,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
4670,
518,
198,
11748,
4704,
278,
198,
198,
6738,
4779,
2024,
13,
13297,
1330,
3012,
198,
6738,
4779,
2024,
... | 3.05 | 60 |
'''
Specialized Instructions.
Copyright (c) 2019-2020 Mit authors
The package is distributed under the MIT/X11 License.
THIS PROGRAM IS PROVIDED AS IS, WITH NO WARRANTY. USE IS AT THE USER’S
RISK.
'''
from dataclasses import dataclass
from code_util import Code
from action import Action, ActionEnum
from spec import Instructions, ExtraInstructions
from stack import StackEffect, Size
@dataclass
class Instruction(Action):
'''
Specialized VM instruction descriptor.
- guard - str - C expression which must evaluate to true for the
specialized instruction to be executed. The guard may assume that the
stack contains enough items to read `effect.args`.
Specialized instructions have only one control flow path. Instructions with
more than one control flow path are modelled as several specialized
instructions with the same opcode and different guard expressions; the
guards for a particular opcode must be exclusive.
Specialized instructions cannot be variadic.
'''
guard: str
terminal: bool=False
def _replace_items(picture, replacement):
'''
Replaces 'ITEMS' with `replacement` in `picture`
'''
ret = []
for item in picture.items:
if item.size.count != 0:
ret.extend(replacement)
else:
ret.append(item.name)
return ret
specialized_instructions = {}
for instruction in Instructions:
if instruction.action.action.is_variadic:
for count in range(4):
specialized_instructions[f'{instruction.name}_WITH_{count}'] = \
_gen_variadic_instruction(instruction, count)
elif instruction.action.action.effect is not None:
specialized_instructions[instruction.name] = \
_gen_ordinary_instruction(instruction)
Instructions = ActionEnum(
'Instructions',
specialized_instructions,
)
# The set of Instructions that might modify the `ir` register.
# We cannot guess beyond such an instruction.
GUESS_LIMITING = frozenset([
Instructions.NEXT,
Instructions.JUMP,
Instructions.JUMPZ,
])
| [
7061,
6,
198,
13409,
1143,
27759,
13,
198,
198,
15269,
357,
66,
8,
13130,
12,
42334,
11707,
7035,
198,
198,
464,
5301,
318,
9387,
739,
262,
17168,
14,
55,
1157,
13789,
13,
198,
198,
43559,
46805,
3180,
36592,
2389,
1961,
7054,
3180,
... | 2.971671 | 706 |
"""
apps_endpoints.py
CLI integration tests for "apps-*" cli commands.
"""
import json
import os
from flask import jsonify, request
from flask_restful import Resource
from .response_templates import response_template_to_json
# Sample response for "apps-list" cli command.
apps_list_response = response_template_to_json("apps-list.json")
class AgaveApps(Resource):
""" Test apps-* cli commands
"""
def get(self):
""" Test apps-list utility
This test emulates the Agave API endpoint "/apps/v2/" for GET
requests. To test it:
curl -sk -H "Authorization: Bearer xxx" 'https://localhost:5000/apps/v2?pretty=True'
"""
pretty_print = request.args.get("pretty", "")
return jsonify(apps_list_response)
| [
37811,
198,
220,
220,
220,
6725,
62,
437,
13033,
13,
9078,
198,
198,
5097,
40,
11812,
5254,
329,
366,
18211,
12,
9,
1,
537,
72,
9729,
13,
198,
37811,
198,
11748,
33918,
198,
11748,
28686,
198,
6738,
42903,
1330,
33918,
1958,
11,
258... | 2.780576 | 278 |
from .classifier import DL85Classifier
| [
6738,
764,
4871,
7483,
1330,
23641,
5332,
9487,
7483,
198
] | 3.9 | 10 |
# -*- coding: utf-8 -*-
"""
ModelLink
=========
Provides the definition of the :class:`~ModelLink` abstract base class.
"""
# %% IMPORTS
# Built-in imports
import abc
from glob import glob
from os import path
from tempfile import mktemp
import warnings
# Package imports
import e13tools as e13
import h5py
import hickle
import numpy as np
from sortedcontainers import SortedDict as sdict, SortedSet as sset
# PRISM imports
from prism import __version__
from prism._docstrings import std_emul_i_doc
from prism._internal import (
FeatureWarning, RequestWarning, check_vals, getCLogger, np_array)
from prism.modellink.utils import convert_data, convert_parameters
# All declaration
__all__ = ['ModelLink']
# %% MODELLINK CLASS DEFINITION
# TODO: Allow for inter-process methods?
# Like, having a method that is called before/after construction.
class ModelLink(object, metaclass=abc.ABCMeta):
"""
Provides an abstract base class definition that allows the
:class:`~prism.Pipeline` class to be linked to any model/test
object of choice. Every model wrapper used in the
:class:`~prism.Pipeline` class must be an instance of the
:class:`~ModelLink` class.
Description
-----------
The :class:`~ModelLink` class is an abstract base class, which forms the
base for wrapping a model and allowing *PRISM* to use it effectively.
Because it is mandatory for every model to be wrapped in a user-made
:class:`~ModelLink` subclass, several tools are provided to the user to
make this as versatile as possible.
The :class:`~ModelLink` class uses three properties that define the way the
subclass will be used by *PRISM*: :attr:`~name`, :attr:`~call_type` and
:attr:`~MPI_call`. The first defines what the name of the subclass is,
which is used by *PRISM* to identify the subclass with and check if one did
not use a different subclass by accident. The other two are flags that
determine how the :meth:`~call_model` method should be used. These three
properties can be set anywhere during the initialization of the
:class:`~ModelLink` subclass, or are set to a default value if they are not
modified.
Every :class:`~ModelLink` subclass needs to be provided with two different
data sets: `model parameters` and `model data`. The model parameters define
which parameters the model can take, what their names are and in what value
range each parameter must be. The model data on the other hand, states
where in a model realization a data value must be retrieved and compared
with a provided observational value. One can think of the model data as the
observational constraints used to calculate the likelihood in a Bayesian
analysis.
The model parameters and model data can be set in two different ways. They
can be hard-coded into the :class:`~ModelLink` subclass by altering the
:meth:`~get_default_model_parameters` and :meth:`~get_default_model_data`
methods or set by providing them during class initialization. A combination
of both is also possible. More details on this can be found in
:meth:`~__init__`.
The :class:`~ModelLink` class has two abstract methods that must be
overridden before the subclass can be initialized.
The :meth:`~call_model` method is the most important method, as it provides
*PRISM* with a way of calling the model wrapped in the :class:`~ModelLink`
subclass. The :meth:`~get_md_var` method allows for *PRISM* to calculate
the model discrepancy variance.
Notes
-----
The :meth:`~__init__` method may be extended by the :class:`~ModelLink`
subclass, but the superclass version must always be called.
If required, one can use the :func:`~prism.modellink.test_subclass`
function to test a :class:`~ModelLink` subclass on correct functionality.
"""
def __init__(self, *, model_parameters=None, model_data=None):
"""
Initialize an instance of the :class:`~ModelLink` subclass.
Optional
--------
model_parameters, model_data : array_like, dict, str or None.\
Default: None
Anything that can be converted to a dict that provides non-default
model parameters/data information or *None* if only default
information is used from :meth:`~get_default_model_parameters` or
:meth:`~get_default_model_data`. For more information on the
lay-out of these dicts, see ``Notes``.
If array_like, dict(`model_parameters`/`model_data`) must generate
a dict with the correct lay-out.
If dict, the dict itself must have the correct lay-out.
If str, the string must be the path to a file containing the dict
keys in the first column and the dict values in the second column,
which combined generate a dict with the correct lay-out.
Notes (model_parameters)
------------------------
The model parameters provides this :class:`~ModelLink` subclass with
the names, ranges and estimates of all model parameters that need to be
explored.
The model parameters dict requires to have the name of the parameters
as the keyword, and a 1D list containing the lower bound, the upper
bound and, if applicable, the estimate of this parameter. It is not
required to provide an estimate for every parameter. The estimates are
used to draw illustrative lines when making projection figures.
An example of a model parameters file can be found in the 'data' folder
of the *PRISM* package. If required, one can use the
:func:`~prism.modellink.convert_parameters` function to validate their
formatting.
Formatting :
``{par_name: [lower_bnd, upper_bnd, par_est]}``
Notes (model_data)
------------------
The model data provides this :class:`~ModelLink` subclass with the
observational data points that need to be used to constrain this model
with.
The model data dict requires to have the data identifiers
(:attr:`~data_idx`) as the keyword, and a 1D list containing the data
value (:attr:`~data_val`); the data errors (:attr:`~data_err`) and the
data space (:attr:`~data_spc`).
If the data errors are given with one value, then the data points are
assumed to have a centered :math:`1\\sigma`-confidence interval. If the
data errors are given with two values, then the data points are
assumed to have a :math:`1\\sigma`-confidence interval defined by the
provided upper and lower errors.
The data spaces are one of five strings ({'lin', 'log' or 'log_10',
'ln' or 'log_e'}) indicating in which of the three value spaces
(linear, log, ln) the data values are. It defaults to 'lin' if it is
not provided.
The data identifier is a sequence of bools, ints, floats and strings
that is unique for every data point. *PRISM* uses it to identify a data
point with, which is required in some cases (like MPI), while the model
itself can use it as a description of the operations required to
extract the data point from the model output. It can be provided as any
sequence of any length for any data point. If any sequence contains a
single element, it is replaced by just that element instead of a tuple.
A simple example of a data identifier is :math:`f(\\text{data_idx}) =
\\text{data_val}`, where the output of the model is given by
:math:`f(x)`.
An example of a model data file can be found in the 'data' folder of
the *PRISM* package. If required, one can use the
:func:`~prism.modellink.convert_data` function to validate their
formatting.
Formatting :
``{(data_idx_0, data_idx_1, ..., data_idx_n): [data_val,`` \
``data_err, data_spc]}`` \n
**or** \n
``{(data_idx_0, data_idx_1, ..., data_idx_n): [data_val,`` \
``upper_data_err, lower_data_err, data_spc]}``
"""
# Save name of this class if not saved already
if not hasattr(self, '_name'):
self.name = self.__class__.__name__
# Set call_type to default ('single') if not modified before
if not hasattr(self, '_call_type'):
self.call_type = 'single'
# Set MPI_call to default (False) if not modified before
if not hasattr(self, '_MPI_call'):
self.MPI_call = False
# Generate model parameter properties
self.__set_model_parameters(model_parameters)
# Generate model data properties
self.__set_model_data(model_data)
# Define the representation of a ModelLink object
# %% CLASS PROPERTIES
# General
@property
def name(self):
"""
str: Name associated with an instance of this :class:`~ModelLink`
subclass.
By default, it is set to the name of this :class:`~ModelLink` subclass.
Can be manually manipulated to allow for more user control.
"""
return(self._name)
@name.setter
@property
def single_call(self):
"""
bool: Whether :meth:`~call_model` can/should be supplied with a single
evaluation sample. At least one of :attr:`~single_call` and
:attr:`~multi_call` must be *True*.
By default, single model calls are requested (True).
"""
return(bool(self._single_call))
@property
def multi_call(self):
"""
bool: Whether :meth:`~call_model` can/should be supplied with a set of
evaluation samples. At least one of :attr:`~single_call` and
:attr:`~multi_call` must be *True*.
By default, single model calls are requested (False).
"""
return(bool(self._multi_call))
@property
def call_type(self):
"""
str: String indicating whether :meth:`call_model` should be supplied
with a single evaluation sample ('single') or a set of samples
('multi'), or can be supplied with both ('hybrid').
By default, single model calls are requested ('single').
"""
return(self._call_type)
@call_type.setter
@property
def MPI_call(self):
"""
bool: Whether :meth:`~call_model` can/should be called by all MPI ranks
simultaneously instead of by the controller.
By default, only the controller rank calls the model (False).
"""
return(bool(self._MPI_call))
@MPI_call.setter
# Model Parameters
@property
def n_par(self):
"""
int: Number of model parameters.
"""
return(self._n_par)
@property
def par_name(self):
"""
list of str: List with model parameter names.
"""
return(self._par_name)
@property
def par_rng(self):
"""
dict of :obj:`~numpy.ndarray`: The lower and upper values of the model
parameters.
"""
return(sdict(zip(self._par_name, self._par_rng)))
@property
def par_est(self):
"""
dict of {float, None}: The user-defined estimated values of the model
parameters. Contains *None* in places where estimates were not
provided.
"""
return(sdict(zip(self._par_name, self._par_est)))
@property
def model_parameters(self):
"""
dict of list: The dict of model parameters as used by this
:obj:`~ModelLink` instance.
This dict can be used as the `model_parameters` argument when
initializing this :class:`~ModelLink` subclass.
"""
# Initialize empty dict of model parameters
model_parameters = sdict()
# Loop over all parameter properties and add them to the dict
for name, rng, est in zip(self._par_name, self._par_rng,
self._par_est):
# If estimate was not given, only add the parameter range
if est is None:
model_parameters[name] = [*rng]
# Else, add range and estimate
else:
model_parameters[name] = [*rng, est]
# Return model_parameters
return(model_parameters)
# Model Data
@property
def n_data(self):
"""
int: Number of provided data points.
"""
return(self._n_data)
@property
def data_val(self):
"""
dict of float: The values of provided data points.
"""
return(dict(zip(self._data_idx, self._data_val)))
@property
def data_err(self):
"""
dict of float: The upper and lower :math:`1\\sigma`-confidence levels
of provided data points.
"""
return(dict(zip(self._data_idx, self._data_err)))
@property
def data_spc(self):
"""
dict of str: The types of value space ({'lin', 'log', 'ln'}) of
provided data points.
"""
return(dict(zip(self._data_idx, self._data_spc)))
@property
def data_idx(self):
"""
list of tuples: The user-defined data point identifiers.
"""
return(self._data_idx)
@property
def model_data(self):
"""
dict of list: The dict of model data points as used by this
:obj:`~ModelLink` instance.
This dict can be used as the `model_data` argument when initializing
this :class:`~ModelLink` subclass.
"""
# Initialize empty dict of model data
model_data = {}
# Combine data points together, only adding non-default values to dict
for idx, val, err, spc in zip(self._data_idx, self._data_val,
self._data_err, self._data_spc):
# Create data point
data_point = [val]
# Add data error, add only one value if error is centered
if(err[0] == err[1]):
data_point.append(err[0])
else:
data_point.extend(err)
# Add data space if it is not 'lin'
if(spc != 'lin'):
data_point.append(spc)
# Add data_point to model_data dict
model_data[idx] = data_point
# Return model_data
return(model_data)
# %% GENERAL CLASS METHODS
# This function returns non-default string representations of input args
def get_str_repr(self):
"""
Returns a list of string representations of all additional input
arguments with which this :class:`~ModelLink` subclass was initialized.
"""
return([])
# This function converts values in unit space to parameter space
def _to_par_space(self, sam_set):
"""
Converts provided `sam_set` from unit space ([0, 1]) to parameter space
([lower_bnd, upper_bnd]).
"""
return(self._par_rng[:, 0]+sam_set*(self._par_rng[:, 1] -
self._par_rng[:, 0]))
# This function converts values in parameter space to unit space
def _to_unit_space(self, sam_set):
"""
Converts provided `sam_set` from parameter space ([lower_bnd,
upper_bnd]) to unit space ([0, 1]).
"""
return((sam_set-self._par_rng[:, 0]) /
(self._par_rng[:, 1]-self._par_rng[:, 0]))
# This function converts a sequence of model parameter names/indices
def _get_model_par_seq(self, par_seq, name):
"""
Converts a provided sequence `par_seq` of model parameter names and
indices to a list of indices, removes duplicates and checks if every
provided name/index is valid.
Parameters
----------
par_seq : 1D array_like of {int, str}
A sequence of integers and strings determining which model
parameters need to be used for a certain operation.
name : str
A string stating the name of the variable the result of this method
will be stored in. Used for error messages.
Returns
-------
par_seq_conv : list of int
The provided sequence `par_seq` converted to a sorted list of
model parameter indices.
"""
# Do some logging
logger = getCLogger('INIT')
logger.info("Converting sequence of model parameter names/indices.")
# Remove all unwanted characters from the string and split it up
par_seq = e13.split_seq(par_seq)
# Check elements if they are ints or strings, and if they are valid
for i, par_idx in enumerate(par_seq):
try:
# If par_idx is a string, try to use it as a parameter name
if isinstance(par_idx, str):
par_seq[i] = self._par_name.index(par_idx)
# If not, try to use it as a parameter index
else:
self._par_name[par_idx]
par_seq[i] = par_idx % self._n_par
# If any operation above fails, raise error
except Exception as error:
err_msg = ("Input argument %r[%i] is invalid! (%s)"
% (name, i, error))
e13.raise_error(err_msg, e13.InputError, logger)
# If everything went without exceptions, check if list is not empty and
# remove duplicates
if par_seq:
par_seq = list(sset(par_seq))
else:
err_msg = "Input argument %r is empty!" % (name)
e13.raise_error(err_msg, ValueError, logger)
# Log end
logger.info("Finished converting sequence of model parameter "
"names/indices.")
# Return it
return(par_seq)
# Returns the hypercube that encloses provided sam_set
def _get_sam_space(self, sam_set):
"""
Returns the boundaries of the hypercube that encloses the parameter
space in which the provided `sam_set` is defined.
The main use for this function is to determine what part of model
parameter space was likely sampled from in order to obtain the provided
`sam_set`. Because of this, extra spacing is added to the boundaries to
reduce the effect of the used sampling method.
Parameters
----------
sam_set : 1D or 2D array_like or dict
Parameter/sample set for which an enclosing hypercube is requested.
Returns
-------
sam_space : 2D :obj:`~numpy.ndarray` object
The requested hypercube boundaries.
"""
# If sam_set is a dict, convert it to a NumPy array
if isinstance(sam_set, dict):
sam_set = np_array(sdict(sam_set).values()).T
# Make sure that sam_set is a 2D NumPy array
sam_set = np_array(sam_set, ndmin=2)
# Determine the maximum difference between consecutive samples
sam_diff = np.apply_along_axis(
lambda x: np.max(np.diff(np.sort(x))), axis=0, arr=sam_set)
# Determine the min/max values of all samples
sam_min = np.min(sam_set, axis=0)
sam_max = np.max(sam_set, axis=0)
# Add 3*sam_diff as extra spacing to sam_min and sam_max
# This reduces the effect of the used sampling method and randomness
sam_min -= 3*sam_diff
sam_max += 3*sam_diff
# Combine sam_min and sam_max to form sam_space
sam_space = np.stack([sam_min, sam_max], axis=1)
# Make sure that sam_space is within par_space
sam_space = np.apply_along_axis(
lambda x: np.clip(x, *self._par_rng.T), axis=0, arr=sam_space)
# Return sam_space
return(sam_space)
# This function checks if a provided mod_set is valid
def _check_mod_set(self, mod_set, name):
"""
Checks validity of provided set of model outputs `mod_set` in this
:obj:`~ModelLink` instance.
Parameters
----------
mod_set : 1D or 2D array_like or dict
Model output (set) to validate in this :obj:`~ModelLink` instance.
name : str
The name of the model output (set), which is used in the error
message if the validation fails.
Returns
-------
mod_set : 1D or 2D :obj:`~numpy.ndarray` object
The provided `mod_set` if the validation was successful. If
`mod_set` was a dict, it will be converted to a
:obj:`~numpy.ndarray` object (sorted on :attr:`~data_idx`).
"""
# Make logger
logger = getCLogger('CHECK')
logger.info("Validating provided set of model outputs %r." % (name))
# If mod_set is a dict, try to convert it to a NumPy array
if isinstance(mod_set, dict):
try:
mod_set = np_array([mod_set[idx] for idx in self._data_idx]).T
except KeyError as error:
err_msg = ("Input argument %r is missing data identifier '%r'!"
% (name, error.args[0]))
e13.raise_error(err_msg, KeyError, logger)
# Make sure that mod_set is a NumPy array
mod_set = np_array(mod_set)
# Raise error if mod_set is not 1D or 2D
if not(mod_set.ndim == 1 or mod_set.ndim == 2):
err_msg = ("Input argument %r is not one-dimensional or "
"two-dimensional!" % (name))
e13.raise_error(err_msg, e13.ShapeError, logger)
# Raise error if mod_set does not have n_data data values
if not(mod_set.shape[-1] == self._n_data):
err_msg = ("Input argument %r has incorrect number of data values "
"(%i != %i)!"
% (name, mod_set.shape[-1], self._n_data))
e13.raise_error(err_msg, e13.ShapeError, logger)
# Check if mod_set solely consists out of floats
mod_set = check_vals(mod_set, name, 'float')
# Log again and return mod_set
logger.info("Finished validating provided set of model outputs %r."
% (name))
return(mod_set)
# This function checks if a provided sam_set is valid
def _check_sam_set(self, sam_set, name):
"""
Checks validity of provided set of model parameter samples `sam_set` in
this :obj:`~ModelLink` instance.
Parameters
----------
sam_set : 1D or 2D array_like or dict
Parameter/sample set to validate in this :obj:`~ModelLink`
instance.
name : str
The name of the parameter/sample set, which is used in the error
message if the validation fails.
Returns
-------
sam_set : 1D or 2D :obj:`~numpy.ndarray` object
The provided `sam_set` if the validation was successful. If
`sam_set` was a dict, it will be converted to a
:obj:`~numpy.ndarray` object.
"""
# Make logger
logger = getCLogger('CHECK')
logger.info("Validating provided set of model parameter samples %r."
% (name))
# If sam_set is a dict, convert it to a NumPy array
if isinstance(sam_set, dict):
sam_set = np_array(sdict(sam_set).values()).T
# Make sure that sam_set is a NumPy array
sam_set = np_array(sam_set)
# Raise error if sam_set is not 1D or 2D
if not(sam_set.ndim == 1 or sam_set.ndim == 2):
err_msg = ("Input argument %r is not one-dimensional or "
"two-dimensional!" % (name))
e13.raise_error(err_msg, e13.ShapeError, logger)
# Raise error if sam_set does not have n_par parameter values
if not(sam_set.shape[-1] == self._n_par):
err_msg = ("Input argument %r has incorrect number of parameters "
"(%i != %i)!"
% (name, sam_set.shape[-1], self._n_par))
e13.raise_error(err_msg, e13.ShapeError, logger)
# Check if sam_set solely consists out of floats
sam_set = check_vals(sam_set, name, 'float')
# Check if all samples are within parameter space
sam_set_2D = np_array(sam_set, ndmin=2)
rng = self._par_rng
check = np.apply_along_axis(
lambda x: ((rng[:, 0] <= x)*(x <= rng[:, 1])).all(), 1, sam_set_2D)
# If check is not empty (can be indexed), raise error
try:
index = np.argwhere(~check)[0]
except IndexError:
pass
else:
err_msg = ("Input argument '%s%s' is outside parameter space!"
% (name, index if sam_set.ndim != 1 else ''))
e13.raise_error(err_msg, ValueError, logger)
# Log again and return sam_set
logger.info("Finished validating provided set of model parameter "
"samples %r." % (name))
return(sam_set)
# This function converts a given sam_set to a sam_dict
def _get_sam_dict(self, sam_set):
"""
Converts a provided set of model parameter samples `sam_set` to a dict
for use in this :obj:`~ModelLink` instance.
This dict can be used as the `par_set` argument in the
:meth:`~call_model` and :meth:`~get_md_var` methods.
Parameters
----------
sam_set : 1D or 2D array_like
Parameter/sample set to convert for this :obj:`~ModelLink`
instance.
Returns
-------
sam_dict : dict of list
Dict of parameter samples.
"""
# Make sure that sam_set is a NumPy array
sam_set = np_array(sam_set)
# Check how many dimensions sam_set has and act accordingly
if(sam_set.ndim == 1):
sam_dict = sdict(zip(self._par_name, sam_set))
else:
sam_dict = sdict(zip(self._par_name, sam_set.T))
# Return sam_dict
return(sam_dict)
# This function checks if a provided md_var is valid
def _check_md_var(self, md_var, name):
"""
Checks validity of provided set of model discrepancy variances `md_var`
in this :obj:`~ModelLink` instance.
Parameters
----------
md_var : 1D or 2D array_like or dict
Model discrepancy variance set to validate in this
:obj:`~ModelLink` instance.
name : str
The name of the model discrepancy set, which is used in the error
message if the validation fails.
Returns
-------
md_var : 2D :obj:`~numpy.ndarray` object
The (converted) provided `md_var` if the validation was successful.
If `md_var` was a dict, it will be converted to a
:obj:`~numpy.ndarray` object.
"""
# Make logger
logger = getCLogger('CHECK')
logger.info("Validating provided set of model discrepancy variances "
"%r." % (name))
# If md_var is a dict, convert it to a NumPy array
if isinstance(md_var, dict):
md_var = np_array([md_var[idx] for idx in md_var.keys()])
# Make sure that md_var is a NumPy array
md_var = np_array(md_var)
# Raise error if md_var is not 1D or 2D
if not(md_var.ndim == 1 or md_var.ndim == 2):
err_msg = ("Input argument %r is not one-dimensional or "
"two-dimensional!" % (name))
e13.raise_error(err_msg, e13.ShapeError, logger)
# Check if md_var contains n_data values
if not(md_var.shape[0] == self._n_data):
err_msg = ("Received array of model discrepancy variances %r has "
"incorrect number of data points (%i != %i)!"
% (name, md_var.shape[0], self._n_data))
raise e13.ShapeError(err_msg)
# Check if single or dual values were given
if(md_var.ndim == 1):
md_var = np_array([md_var]*2).T
elif(md_var.shape[1] == 2):
pass
else:
err_msg = ("Received array of model discrepancy variances %r has "
"incorrect number of values (%i != 2)!"
% (name, md_var.shape[1]))
raise e13.ShapeError(err_msg)
# Check if all values are non-negative floats
md_var = check_vals(md_var, 'md_var', 'nneg', 'float')
# Log again and return md_var
logger.info("Finished validating provided set of model discrepancy "
"variances %r." % (name))
return(md_var)
# This function returns the path to a backup file
# TODO: Should backup file be saved in emulator working directory of PRISM?
def _get_backup_path(self, emul_i, suffix):
"""
Returns the absolute path to a backup file made by this
:obj:`~ModelLink` instance, using the provided `emul_i` and `suffix`.
This method is used by the :meth:`~_make_backup` and
:meth:`~_read_backup` methods, and should not be called directly.
Parameters
----------
emul_i : int
The emulator iteration for which a backup filepath is needed.
suffix : str or None
If str, determine path to associated backup file using provided
`suffix`. If `suffix` is empty, obtain last created backup file.
If *None*, create a new path to a backup file.
Returns
-------
filepath : str
Absolute path to requested backup file.
"""
# Determine the prefix of the backup hdf5-file
prefix = "backup_%i_%s(" % (emul_i, self._name)
# If suffix is None, generate new backup filepath
if suffix is None:
# Determine the path of the backup hdf5-file
filepath = path.abspath(mktemp(').hdf5', prefix, '.'))
# Return determined filepath
return(filepath)
# If suffix is a string, determine the path
elif isinstance(suffix, str):
# If the string is empty, find the last created backup file
if not suffix:
# Make list of all valid backup files in current directory
files = glob("%s/%s*" % (path.abspath('.'), prefix))
# If files is not empty, return last one created
if files:
return(max(files, key=path.getctime))
# Else, raise error
else:
err_msg = ("No backup files can be found in the current "
"directory for input argument 'emul_i'!")
raise OSError(err_msg)
# If the string is not empty, check if provided suffix is valid
else:
# Obtain full filepath
filepath = path.abspath(path.join(
'.', ''.join([prefix, suffix, ').hdf5'])))
# If filepath exists, return it
if path.exists(filepath):
return(filepath)
# If not, raise error
else:
err_msg = ("Input argument 'suffix' does not yield an "
"existing path to a backup file (%r)!"
% (filepath))
raise OSError(err_msg)
# This function makes a backup of args/kwargs to be used during call_model
def _make_backup(self, *args, **kwargs):
"""
WARNING: This is an advanced utility method and probably will not work
unless used properly. Use with caution!
Creates an HDF5-file backup of the provided `args` and `kwargs` when
called by the :meth:`~call_model` method or any of its inner functions.
Additionally, the backup will contain the `emul_i`, `par_set` and
`data_idx` values that were passed to the :meth:`~call_model` method.
It also contains the version of *PRISM* that made the backup.
The backup can be restored using the :meth:`~_read_backup` method.
If it is detected that this method is used incorrectly, a
:class:`~prism._internal.RequestWarning` is raised (and the method
returns) rather than a :class:`~prism._internal.RequestError`, in order
to not disrupt the call to :meth:`~call_model`.
Parameters
----------
args : positional arguments
All positional arguments that must be stored in the backup file.
kwargs : keyword arguments
All keyword arguments that must be stored in the backup file.
Notes
-----
The name of the created backup file contains the value of `emul_i`,
:attr:`~name` and a random string to avoid replacing an already
existing backup file.
The saved `emul_i`, `par_set` and `data_idx` are the values these
variables have locally in the :meth:`~call_model` method at the point
this method is called. Because of this, making any changes to them may
cause problems and is therefore heavily discouraged. If changes are
necessary, it is advised to copy them to a different variable first.
"""
# Raise warning about this feature being experimental
warn_msg = ("The 'call_model' backup system is still experimental and "
"it may see significant changes or be (re)moved in the "
"future!")
warnings.warn(warn_msg, FeatureWarning, stacklevel=2)
# Check if any args or kwargs have been provided
if not args and not kwargs:
# If not, issue a warning about that and return
warn_msg = ("No positional or keyword arguments have been "
"provided. Backup creation will be skipped!")
warnings.warn(warn_msg, RequestWarning, stacklevel=2)
return
# Obtain the call_model frame
caller_frame = e13.get_outer_frame(self.call_model)
# If caller_frame is None, the call_model frame was not found
if caller_frame is None:
# Issue a warning about it and return
warn_msg = ("This method has been called from outside the "
"'call_model' method. Backup creation will be "
"skipped!")
warnings.warn(warn_msg, RequestWarning, stacklevel=2)
return
# Obtain the locals of the call_model frame
loc = caller_frame.f_locals
# Extract local emul_i, par_set and data_idx
# Unless call_model was called using args, below will extract correctly
# These one-liners are the equivalent of
# try:
# emul_i = loc['emul_i']
# except KeyError:
# try:
# emul_i = loc['kwargs']['emul_i']
# except KeyError:
# emul_i = None
emul_i = loc.get('emul_i', loc.get('kwargs', {}).get('emul_i'))
par_set = loc.get('par_set', loc.get('kwargs', {}).get('par_set'))
data_idx = loc.get('data_idx', loc.get('kwargs', {}).get('data_idx'))
# If one of these is None, then it is not correctly locally available
# This can happen if args are used instead of kwargs for call_model
# PRISM code always uses kwargs and never causes this problem
if None in (emul_i, par_set, data_idx):
warn_msg = ("Required local variables 'emul_i', 'par_set' and "
"'data_idx' are not correctly available. Backup "
"creation will be skipped!")
warnings.warn(warn_msg, RequestWarning, stacklevel=2)
return
# Obtain path to backup file
filepath = self._get_backup_path(emul_i, None)
# Save emul_i, par_set, data_idx, args and kwargs to hdf5
with h5py.File(filepath, 'w') as file:
file.attrs['emul_i'] = emul_i
file.attrs['prism_version'] = __version__
hickle.dump(dict(par_set), file, path='/par_set')
hickle.dump(data_idx, file, path='/data_idx')
hickle.dump(args, file, path='/args')
hickle.dump(kwargs, file, path='/kwargs')
# This function reads in a backup made by _make_backup
# TODO: Allow for absolute path to backup file to be given?
# TODO: Convert to static method to read backups without subclass object?
def _read_backup(self, emul_i, *, suffix=None):
"""
Reads in a backup HDF5-file created by the :meth:`~_make_backup`
method, using the provided `emul_i` and the value of :attr:`~name`.
Parameters
----------
emul_i : int
The emulator iteration that was provided to the :meth:`~call_model`
method when the backup was made.
Optional
--------
suffix : str or None. Default: None
The suffix of the backup file (everything between parentheses) that
needs to be read. If *None* or empty, the last created backup will
be read.
Returns
-------
filename : str
The absolute path to the backup file that has been read.
data : dict with keys `('emul_i', 'prism_version', 'par_set',` \
`'data_idx', 'args', 'kwargs')`
A dict containing the data that was provided to the
:meth:`~_make_backup` method.
"""
# Raise warning about this feature being experimental
warn_msg = ("The 'call_model' backup system is still experimental and "
"it may see significant changes or be (re)moved in the "
"future!")
warnings.warn(warn_msg, FeatureWarning, stacklevel=2)
# Check if provided emul_i is an integer
emul_i = check_vals(emul_i, 'emul_i', 'int', 'nneg')
# Check if provided suffix is None or a string
if suffix is None:
suffix = ''
else:
suffix = check_vals(suffix, 'suffix', 'str')
# Obtain name of backup file
filepath = self._get_backup_path(emul_i, suffix)
# Initialize empty data dict
data = sdict()
# Read emul_i, par_set, data_idx, args and kwargs from hdf5
with h5py.File(filepath, 'r') as file:
data['emul_i'] = file.attrs['emul_i']
data['prism_version'] = file.attrs['prism_version']
data['par_set'] = sdict(hickle.load(file, path='/par_set'))
data['data_idx'] = hickle.load(file, path='/data_idx')
data['args'] = hickle.load(file, path='/args')
data['kwargs'] = hickle.load(file, path='/kwargs')
# Return data
return(filepath, data)
@property
def _default_model_parameters(self):
"""
dict: The default model parameters to use for every instance of this
:class:`~ModelLink` subclass.
"""
return(sdict())
def get_default_model_parameters(self):
"""
Returns the default model parameters to use for every instance of this
:class:`~ModelLink` subclass. By default, returns
:attr:`~ModelLink._default_model_parameters`.
"""
return(self._default_model_parameters)
def __set_model_parameters(self, add_model_parameters):
"""
Generates the model parameter properties from the default model
parameters and the additional input argument `add_model_parameters`.
Parameters
----------
add_model_parameters : array_like, dict, str or None
Anything that can be converted to a dict that provides non-default
model parameters information or *None* if only default information
is used from :meth:`~ModelLink.get_default_model_parameters`.
Generates
---------
n_par : int
Number of model parameters.
par_name : list
List with model parameter names.
par_rng : :obj:`~numpy.ndarray` object
Array containing the lower and upper values of the model
parameters.
par_est : list
List containing user-defined estimated values of the model
parameters.
Contains *None* in places where estimates were not provided.
"""
# Obtain default model parameters
model_parameters =\
convert_parameters(self.get_default_model_parameters())
# If additional model parameters information is given, add it
if add_model_parameters is not None:
model_parameters.update(convert_parameters(add_model_parameters))
# Save number of model parameters
n_par = len(model_parameters.keys())
if(n_par == 1):
raise e13.InputError("Number of model parameters must be at least "
"2!")
else:
self._n_par = check_vals(n_par, 'n_par', 'pos')
# Create empty parameter name, ranges and estimate lists/arrays
self._par_name = []
self._par_rng = np.zeros([self._n_par, 2])
self._par_rng[:, 1] = 1
self._par_est = []
# Save model parameters as class properties
for i, (name, (*rng, est)) in enumerate(model_parameters.items()):
# Save parameter name, range and est
self._par_name.append(name)
self._par_rng[i] = rng
self._par_est.append(est)
@property
def _default_model_data(self):
"""
dict: The default model data to use for every instance of this
:class:`~ModelLink` subclass.
"""
return(dict())
def get_default_model_data(self):
"""
Returns the default model data to use for every instance of this
:class:`~ModelLink` subclass. By default, returns
:attr:`~ModelLink._default_model_data`.
"""
return(self._default_model_data)
def __set_model_data(self, add_model_data):
"""
Generates the model data properties from the default model data and the
additional input argument `add_model_data`.
Parameters
---------
add_model_data : array_like, dict, str or None
Anything that can be converted to a dict that provides non-default
model data information or *None* if only default data is used from
:meth:`~ModelLink.get_default_model_data`.
Generates
---------
n_data : int
Number of provided data points.
data_val : list
List with values of provided data points.
data_err : list of lists
List with upper and lower :math:`1\\sigma`-confidence levels of
provided data points.
data_spc : list
List with types of value space ({'lin', 'log', 'ln'}) of provided
data points.
data_idx : list of tuples
List with user-defined data point identifiers.
"""
# Obtain default model data
model_data = convert_data(self.get_default_model_data())
# If additional model data information is given, add it
if add_model_data is not None:
model_data.update(convert_data(add_model_data))
# Determine the number of data points
self._n_data = check_vals(len(model_data), 'n_data', 'pos')
# Create empty data value, error, space and identifier lists
self._data_val = []
self._data_err = []
self._data_spc = []
self._data_idx = []
# Save model data as class properties
for idx, (val, *err, spc) in model_data.items():
# Save data value, errors, space and identifier
self._data_val.append(val)
self._data_err.append(err)
self._data_spc.append(spc)
self._data_idx.append(idx)
# %% ABSTRACT USER METHODS
@abc.abstractmethod
@e13.docstring_substitute(emul_i=std_emul_i_doc)
def call_model(self, emul_i, par_set, data_idx):
"""
Calls the model wrapped in this :class:`~ModelLink` subclass at
emulator iteration `emul_i` for model parameter values `par_set` and
returns the data points corresponding to `data_idx`.
This method is called with solely keyword arguments.
This is an abstract method and must be overridden by the
:class:`~ModelLink` subclass.
Parameters
----------
%(emul_i)s
par_set : dict of :class:`~numpy.float64`
Dict containing the values for all model parameters corresponding
to the requested model realization(s). If model is single-called,
dict item is formatted as ``{par_name: par_val}``. If multi-called,
it is formatted as ``{par_name: [par_val_1, par_val_2, ...,
par_val_n]}``.
data_idx : list of tuples
List containing the user-defined data point identifiers
corresponding to the requested data points.
Returns
-------
data_val : 1D or 2D array_like or dict
Array containing the data values corresponding to the requested
data points generated by the requested model realization(s). If
model is multi-called, `data_val` is of shape ``(n_sam, n_data)``.
If dict, it has the identifiers in `data_idx` as its keys with
either scalars or 1D array_likes as its values.
Note
----
If this model is multi-called, then the parameter sets in the provided
`par_set` dict will be sorted in order of parameter name (e.g., sort on
first parameter first, then on second parameter, etc.).
"""
# Raise NotImplementedError if only super() was called
raise NotImplementedError("This method must be user-written in the "
"ModelLink subclass!")
@abc.abstractmethod
@e13.docstring_substitute(emul_i=std_emul_i_doc)
def get_md_var(self, emul_i, par_set, data_idx):
"""
Calculates the linear model discrepancy variance at a given emulator
iteration `emul_i` for model parameter values `par_set` and given data
points `data_idx` for the model wrapped in this :class:`~ModelLink`
subclass.
This method is always single-called by one MPI rank with solely keyword
arguments.
This is an abstract method and must be overridden by the
:class:`~ModelLink` subclass.
Parameters
----------
%(emul_i)s
par_set : dict of :class:`~numpy.float64`
Dict containing the values for all model parameters corresponding
to the requested model realization.
data_idx : list of tuples
List containing the user-defined data point identifiers
corresponding to the requested data points.
Returns
-------
md_var : 1D or 2D array_like
Array containing the linear model discrepancy variance values
corresponding to the requested data points. If 1D array_like, data
is assumed to have a centered one sigma confidence interval. If 2D
array_like, the values determine the upper and lower variances and
the array is of shape ``(n_data, 2)``.
If dict, it has the identifiers in `data_idx` as its keys with
either scalars or 1D array_likes of length 2 as its values.
Notes
-----
The returned model discrepancy variance values must be of linear form,
even for those data values that are returned in logarithmic form by the
:meth:`~call_model` method. If not, the possibility exists that the
emulation process will not converge properly.
"""
# Raise NotImplementedError if only super() was called
raise NotImplementedError("This method must be user-written in the "
"ModelLink subclass!")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
17633,
11280,
198,
2559,
28,
198,
15946,
1460,
262,
6770,
286,
262,
1058,
4871,
25,
63,
93,
17633,
11280,
63,
12531,
2779,
1398,
13,
198,
198,
37811,
... | 2.377373 | 20,330 |
import numpy as np
from pysabr import Hagan2002NormalSABR
import logging
| [
11748,
299,
32152,
355,
45941,
198,
6738,
279,
893,
397,
81,
1330,
367,
7329,
16942,
26447,
4090,
11473,
198,
11748,
18931,
628
] | 3.363636 | 22 |
"""The search command."""
import requests, json
from termcolor import colored
from unidecode import unidecode
from newsapi.user_settings import UserSettings
from .base import Base
| [
37811,
464,
2989,
3141,
526,
15931,
198,
198,
11748,
7007,
11,
33918,
198,
6738,
3381,
8043,
1330,
16396,
198,
6738,
555,
485,
8189,
1330,
555,
485,
8189,
198,
6738,
1705,
15042,
13,
7220,
62,
33692,
1330,
11787,
26232,
198,
6738,
764,
... | 3.509091 | 55 |
import unittest
from metatron import Metatron, add_schema_spec
from metatron.schemas import SCHEMAS
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
6738,
1138,
23484,
1330,
38894,
11,
751,
62,
15952,
2611,
62,
16684,
198,
6738,
1138,
23484,
13,
1416,
4411,
292,
1330,
22374,
3620,
1921,
628,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10... | 2.849057 | 53 |
import taskloaf as tsk
import logging
logger = logging.getLogger(__name__)
# An example of a taskloaf "daemon"? Could be cool!
| [
11748,
4876,
5439,
1878,
355,
256,
8135,
198,
198,
11748,
18931,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
198,
198,
2,
1052,
1672,
286,
257,
4876,
5439,
1878,
366,
6814,
7966,
13984,
10347,
307,
... | 2.891304 | 46 |
#
# PySNMP MIB module BIANCA-BRICK-CAPI-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/BIANCA-BRICK-CAPI-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:21:03 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
BitValue, = mibBuilder.importSymbols("BIANCA-BRICK-PPP-MIB", "BitValue")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Unsigned32, Integer32, MibIdentifier, Bits, Counter32, Counter64, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, IpAddress, iso, NotificationType, ObjectIdentity, ModuleIdentity, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Integer32", "MibIdentifier", "Bits", "Counter32", "Counter64", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "IpAddress", "iso", "NotificationType", "ObjectIdentity", "ModuleIdentity", "TimeTicks")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
org = MibIdentifier((1, 3))
dod = MibIdentifier((1, 3, 6))
internet = MibIdentifier((1, 3, 6, 1))
private = MibIdentifier((1, 3, 6, 1, 4))
enterprises = MibIdentifier((1, 3, 6, 1, 4, 1))
bintec = MibIdentifier((1, 3, 6, 1, 4, 1, 272))
bibo = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4))
capi = MibIdentifier((1, 3, 6, 1, 4, 1, 272, 4, 7))
capiApplTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 7, 1), )
if mibBuilder.loadTexts: capiApplTable.setStatus('mandatory')
capiApplEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1), ).setIndexNames((0, "BIANCA-BRICK-CAPI-MIB", "capiApplNumber"))
if mibBuilder.loadTexts: capiApplEntry.setStatus('mandatory')
capiApplNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiApplNumber.setStatus('mandatory')
capiApplVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("capi11", 1), ("capi20", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiApplVersion.setStatus('mandatory')
capiApplByteOrder = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("little-endian", 1), ("big-endian", 2), ("undef", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiApplByteOrder.setStatus('mandatory')
capiApplRegLevel3Cnt = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiApplRegLevel3Cnt.setStatus('mandatory')
capiApplRegMsgCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiApplRegMsgCnt.setStatus('mandatory')
capiApplRegDblkCnt = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiApplRegDblkCnt.setStatus('mandatory')
capiApplRegDblkSize = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiApplRegDblkSize.setStatus('mandatory')
capiApplInfoStr = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiApplInfoStr.setStatus('mandatory')
capiListenTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 7, 2), )
if mibBuilder.loadTexts: capiListenTable.setStatus('mandatory')
capiListenEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 7, 2, 1), ).setIndexNames((0, "BIANCA-BRICK-CAPI-MIB", "capiListenApplication"))
if mibBuilder.loadTexts: capiListenEntry.setStatus('mandatory')
capiListenApplication = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiListenApplication.setStatus('mandatory')
capiListenController = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 2, 1, 2), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiListenController.setStatus('mandatory')
capiListenServiceMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 2, 1, 3), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiListenServiceMask.setStatus('mandatory')
capiListenEazMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 2, 1, 4), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiListenEazMask.setStatus('mandatory')
capiListenInfoMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 2, 1, 5), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiListenInfoMask.setStatus('mandatory')
capiListenCipMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 2, 1, 6), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiListenCipMask.setStatus('mandatory')
capiListenCipMask2 = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 2, 1, 7), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiListenCipMask2.setStatus('mandatory')
capiPlciTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 7, 3), )
if mibBuilder.loadTexts: capiPlciTable.setStatus('mandatory')
capiPlciEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1), ).setIndexNames((0, "BIANCA-BRICK-CAPI-MIB", "capiPlciNumber"))
if mibBuilder.loadTexts: capiPlciEntry.setStatus('mandatory')
capiPlciNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 1), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciNumber.setStatus('mandatory')
capiPlciApplication = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciApplication.setStatus('mandatory')
capiPlciController = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 3), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciController.setStatus('mandatory')
capiPlciState = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("p-0", 1), ("p-1", 2), ("p-2", 3), ("p-3", 4), ("p-4", 5), ("p-5", 6), ("p-6", 7), ("p-7", 8), ("p-act", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciState.setStatus('mandatory')
capiPlciSelectB2Proto = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 241, 242))).clone(namedValues=NamedValues(("undef", 1), ("x75", 2), ("hdlccrc", 3), ("trans", 4), ("sdlc", 5), ("x75btx", 6), ("fax", 7), ("lapd", 8), ("v110trans", 9), ("v110sdlc", 10), ("v110x75", 11), ("txonly", 12), ("modem", 241), ("v110sync", 242)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciSelectB2Proto.setStatus('mandatory')
capiPlciSelectB2Dlpd = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 6), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciSelectB2Dlpd.setStatus('mandatory')
capiPlciSelectB3Proto = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("undef", 1), ("t70nl", 2), ("iso8208", 3), ("t90", 4), ("trans", 5), ("t30", 6)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciSelectB3Proto.setStatus('mandatory')
capiPlciSelectB3Ncpd = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 8), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciSelectB3Ncpd.setStatus('mandatory')
capiPlciB1Proto = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11))).clone(namedValues=NamedValues(("undef", 1), ("hdlc", 2), ("trans", 3), ("v110trans", 4), ("v110hdlc", 5), ("faxg3", 6), ("hdlcinv", 7), ("hdlc56", 8), ("modemneg", 9), ("modemasync", 10), ("modemsync", 11)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciB1Proto.setStatus('mandatory')
capiPlciB1Config = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 10), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciB1Config.setStatus('mandatory')
capiPlciB2Proto = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("undef", 1), ("x75", 2), ("trans", 3), ("sdlc", 4), ("lapd", 5), ("t30", 6), ("ppp", 7), ("transerr", 8), ("modem", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciB2Proto.setStatus('mandatory')
capiPlciB2Config = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 12), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciB2Config.setStatus('mandatory')
capiPlciB3Proto = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 9))).clone(namedValues=NamedValues(("undef", 1), ("trans", 2), ("t90", 3), ("iso8208", 4), ("x25dce", 5), ("t30", 6), ("t30ext", 7), ("modem", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciB3Proto.setStatus('mandatory')
capiPlciB3Config = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 14), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciB3Config.setStatus('mandatory')
capiPlciCipValue = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciCipValue.setStatus('mandatory')
capiPlciInfoMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 3, 1, 16), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiPlciInfoMask.setStatus('mandatory')
capiNcciTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 7, 4), )
if mibBuilder.loadTexts: capiNcciTable.setStatus('mandatory')
capiNcciEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 7, 4, 1), ).setIndexNames((0, "BIANCA-BRICK-CAPI-MIB", "capiNcciNumber"))
if mibBuilder.loadTexts: capiNcciEntry.setStatus('mandatory')
capiNcciNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 4, 1, 1), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiNcciNumber.setStatus('mandatory')
capiNcciApplication = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiNcciApplication.setStatus('mandatory')
capiNcciPlci = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 4, 1, 3), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiNcciPlci.setStatus('mandatory')
capiNcciState = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 4, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("n-0", 1), ("n-1", 2), ("n-2", 3), ("n-3", 4), ("n-4", 5), ("n-5", 6), ("n-act", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiNcciState.setStatus('mandatory')
capiInfoTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 7, 5), )
if mibBuilder.loadTexts: capiInfoTable.setStatus('mandatory')
capiInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 7, 5, 1), ).setIndexNames((0, "BIANCA-BRICK-CAPI-MIB", "capiInfoApplication"))
if mibBuilder.loadTexts: capiInfoEntry.setStatus('mandatory')
capiInfoApplication = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 5, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiInfoApplication.setStatus('mandatory')
capiInfoPlci = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 5, 1, 2), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiInfoPlci.setStatus('mandatory')
capiInfoNcci = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 5, 1, 3), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiInfoNcci.setStatus('mandatory')
capiInfoC1Command = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 5, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 64, 128, 129, 130, 131, 132, 133, 134, 135))).clone(namedValues=NamedValues(("resetb3", 1), ("connect", 2), ("conact", 3), ("disc", 4), ("listen", 5), ("param", 6), ("info", 7), ("data", 8), ("conninfo", 9), ("dtmf", 10), ("selb2", 64), ("selb3", 128), ("listenb3", 129), ("conb3", 130), ("conb3act", 131), ("discb3", 132), ("b3param", 133), ("datab3", 134), ("handset", 135)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiInfoC1Command.setStatus('mandatory')
capiInfoC2Command = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 5, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 8, 65, 128, 130, 131, 132, 134, 135, 136, 255))).clone(namedValues=NamedValues(("alert", 1), ("connect", 2), ("conact", 3), ("disc", 4), ("listen", 5), ("info", 8), ("selectb", 65), ("facility", 128), ("conb3", 130), ("conb3act", 131), ("discb3", 132), ("datab3", 134), ("resetb3", 135), ("conb3t90", 136), ("manufact", 255)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiInfoC2Command.setStatus('mandatory')
capiInfoSubCommand = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 5, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("req", 1), ("conf", 2), ("ind", 3), ("resp", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiInfoSubCommand.setStatus('mandatory')
capiInfoNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 5, 1, 7), HexValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiInfoNumber.setStatus('mandatory')
capiConfigTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 7, 6), )
if mibBuilder.loadTexts: capiConfigTable.setStatus('mandatory')
capiConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1), ).setIndexNames((0, "BIANCA-BRICK-CAPI-MIB", "capiConfigStkNumber"))
if mibBuilder.loadTexts: capiConfigEntry.setStatus('mandatory')
capiConfigStkNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: capiConfigStkNumber.setStatus('mandatory')
capiConfigFaxG3RcvSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("r4800", 1), ("r7200", 2), ("r9600", 3), ("r14400", 4), ("maximum", 5), ("not-available", 6), ("delete", 7)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigFaxG3RcvSpeed.setStatus('mandatory')
capiConfigFaxG3ECM = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("not-available", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigFaxG3ECM.setStatus('mandatory')
capiConfigFaxG3Header = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("logo-header", 1), ("no-logo", 2), ("no-header", 3), ("not-available", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigFaxG3Header.setStatus('mandatory')
capiConfigVoiceCoding = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("reverse", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigVoiceCoding.setStatus('mandatory')
capiConfigSendAlerting = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("never", 1), ("ever", 2), ("voice-only", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigSendAlerting.setStatus('mandatory')
capiConfigV42bis = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("on", 1), ("off", 2), ("not-available", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigV42bis.setStatus('mandatory')
capiConfigModemDefault = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("modem-profile-1", 1), ("modem-profile-2", 2), ("modem-profile-3", 3), ("modem-profile-4", 4), ("modem-profile-5", 5), ("modem-profile-6", 6), ("modem-profile-7", 7), ("modem-profile-8", 8)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigModemDefault.setStatus('mandatory')
capiConfigFaxModulation = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 5, 6))).clone(namedValues=NamedValues(("v17", 1), ("v33", 2), ("v29", 3), ("v17s", 5), ("v33s", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigFaxModulation.setStatus('mandatory')
capiConfigFax12000 = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("on", 1), ("off", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigFax12000.setStatus('mandatory')
capiConfigFaxTXLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 6, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))).clone(namedValues=NamedValues(("db0", 1), ("db3", 2), ("db6", 3), ("db9", 4), ("db12", 5), ("db15", 6)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiConfigFaxTXLevel.setStatus('mandatory')
capiMultiControllerTable = MibTable((1, 3, 6, 1, 4, 1, 272, 4, 7, 7), )
if mibBuilder.loadTexts: capiMultiControllerTable.setStatus('mandatory')
capiMultiControllerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 272, 4, 7, 7, 1), ).setIndexNames((0, "BIANCA-BRICK-CAPI-MIB", "capiControllerNumber"))
if mibBuilder.loadTexts: capiMultiControllerEntry.setStatus('mandatory')
capiControllerNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 31))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiControllerNumber.setStatus('mandatory')
capiControllerStkMask = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 7, 1, 2), BitValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiControllerStkMask.setStatus('mandatory')
capiControllerVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 272, 4, 7, 7, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("capi11", 1), ("capi20", 2), ("delete", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: capiControllerVersion.setStatus('mandatory')
mibBuilder.exportSymbols("BIANCA-BRICK-CAPI-MIB", capiListenInfoMask=capiListenInfoMask, capiNcciEntry=capiNcciEntry, enterprises=enterprises, capiPlciB3Proto=capiPlciB3Proto, dod=dod, capiListenApplication=capiListenApplication, capiPlciInfoMask=capiPlciInfoMask, capiInfoC2Command=capiInfoC2Command, capiMultiControllerTable=capiMultiControllerTable, capiInfoTable=capiInfoTable, capiListenCipMask=capiListenCipMask, capiControllerStkMask=capiControllerStkMask, capiMultiControllerEntry=capiMultiControllerEntry, capiListenCipMask2=capiListenCipMask2, capiPlciNumber=capiPlciNumber, capiListenServiceMask=capiListenServiceMask, capiPlciB1Proto=capiPlciB1Proto, capiNcciPlci=capiNcciPlci, bintec=bintec, capiPlciApplication=capiPlciApplication, capiApplTable=capiApplTable, capiNcciNumber=capiNcciNumber, capiApplInfoStr=capiApplInfoStr, capiInfoC1Command=capiInfoC1Command, capiInfoApplication=capiInfoApplication, capiConfigEntry=capiConfigEntry, capiControllerNumber=capiControllerNumber, capiApplRegDblkSize=capiApplRegDblkSize, capiApplEntry=capiApplEntry, capiInfoNcci=capiInfoNcci, capiInfoSubCommand=capiInfoSubCommand, capiListenEntry=capiListenEntry, capiConfigVoiceCoding=capiConfigVoiceCoding, capiInfoNumber=capiInfoNumber, HexValue=HexValue, capiConfigTable=capiConfigTable, capiApplRegMsgCnt=capiApplRegMsgCnt, capiApplRegLevel3Cnt=capiApplRegLevel3Cnt, capiConfigFaxModulation=capiConfigFaxModulation, capiListenController=capiListenController, capiApplVersion=capiApplVersion, capiInfoEntry=capiInfoEntry, capiPlciState=capiPlciState, capiNcciTable=capiNcciTable, capiPlciTable=capiPlciTable, capiPlciSelectB3Ncpd=capiPlciSelectB3Ncpd, internet=internet, capiInfoPlci=capiInfoPlci, capiApplByteOrder=capiApplByteOrder, bibo=bibo, capiConfigFaxG3Header=capiConfigFaxG3Header, capiApplRegDblkCnt=capiApplRegDblkCnt, capiConfigStkNumber=capiConfigStkNumber, capiListenEazMask=capiListenEazMask, capiListenTable=capiListenTable, capiPlciEntry=capiPlciEntry, capiPlciB3Config=capiPlciB3Config, capiPlciSelectB2Proto=capiPlciSelectB2Proto, capiConfigModemDefault=capiConfigModemDefault, capiControllerVersion=capiControllerVersion, capiPlciSelectB2Dlpd=capiPlciSelectB2Dlpd, capiConfigSendAlerting=capiConfigSendAlerting, capiNcciApplication=capiNcciApplication, capi=capi, capiPlciB2Proto=capiPlciB2Proto, capiPlciSelectB3Proto=capiPlciSelectB3Proto, capiPlciB1Config=capiPlciB1Config, capiApplNumber=capiApplNumber, capiConfigFaxG3RcvSpeed=capiConfigFaxG3RcvSpeed, capiPlciB2Config=capiPlciB2Config, capiNcciState=capiNcciState, private=private, capiConfigFaxTXLevel=capiConfigFaxTXLevel, capiPlciCipValue=capiPlciCipValue, capiConfigV42bis=capiConfigV42bis, capiConfigFax12000=capiConfigFax12000, capiPlciController=capiPlciController, org=org, capiConfigFaxG3ECM=capiConfigFaxG3ECM)
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
347,
16868,
8141,
12,
11473,
11860,
12,
34,
17614,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
1... | 2.538787 | 8,740 |
"""Current-flow betweenness centrality measures."""
import networkx as nx
from networkx.algorithms.centrality.flow_matrix import (
CGInverseLaplacian,
flow_matrix_row,
FullInverseLaplacian,
SuperLUInverseLaplacian,
)
from networkx.utils import (
not_implemented_for,
reverse_cuthill_mckee_ordering,
py_random_state,
)
__all__ = [
"current_flow_betweenness_centrality",
"approximate_current_flow_betweenness_centrality",
"edge_current_flow_betweenness_centrality",
]
@py_random_state(7)
@not_implemented_for("directed")
def approximate_current_flow_betweenness_centrality(
G,
normalized=True,
weight=None,
dtype=float,
solver="full",
epsilon=0.5,
kmax=10000,
seed=None,
):
r"""Compute the approximate current-flow betweenness centrality for nodes.
Approximates the current-flow betweenness centrality within absolute
error of epsilon with high probability [1]_.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional (default=True)
If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
n is the number of nodes in G.
weight : string or None, optional (default=None)
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
The weight reflects the capacity or the strength of the
edge.
dtype : data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver : string (default='full')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
epsilon: float
Absolute error tolerance.
kmax: int
Maximum number of sample node pairs to use for approximation.
seed : integer, random_state, or None (default)
Indicator of random number generation state.
See :ref:`Randomness<randomness>`.
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
current_flow_betweenness_centrality
Notes
-----
The running time is $O((1/\epsilon^2)m{\sqrt k} \log n)$
and the space required is $O(m)$ for $n$ nodes and $m$ edges.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Ulrik Brandes and Daniel Fleischer:
Centrality Measures Based on Current Flow.
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
https://doi.org/10.1007/978-3-540-31856-9_44
"""
import numpy as np
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
solvername = {
"full": FullInverseLaplacian,
"lu": SuperLUInverseLaplacian,
"cg": CGInverseLaplacian,
}
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
L = nx.laplacian_matrix(H, nodelist=range(n), weight=weight).asformat("csc")
L = L.astype(dtype)
C = solvername[solver](L, dtype=dtype) # initialize solver
betweenness = dict.fromkeys(H, 0.0)
nb = (n - 1.0) * (n - 2.0) # normalization factor
cstar = n * (n - 1) / nb
l = 1 # parameter in approximation, adjustable
k = l * int(np.ceil((cstar / epsilon) ** 2 * np.log(n)))
if k > kmax:
msg = f"Number random pairs k>kmax ({k}>{kmax}) "
raise nx.NetworkXError(msg, "Increase kmax or epsilon")
cstar2k = cstar / (2 * k)
for i in range(k):
s, t = seed.sample(range(n), 2)
b = np.zeros(n, dtype=dtype)
b[s] = 1
b[t] = -1
p = C.solve(b)
for v in H:
if v == s or v == t:
continue
for nbr in H[v]:
w = H[v][nbr].get(weight, 1.0)
betweenness[v] += w * np.abs(p[v] - p[nbr]) * cstar2k
if normalized:
factor = 1.0
else:
factor = nb / 2.0
# remap to original node names and "unnormalize" if required
return {ordering[k]: float(v * factor) for k, v in betweenness.items()}
@not_implemented_for("directed")
def current_flow_betweenness_centrality(
G, normalized=True, weight=None, dtype=float, solver="full"
):
r"""Compute current-flow betweenness centrality for nodes.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional (default=True)
If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
n is the number of nodes in G.
weight : string or None, optional (default=None)
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
The weight reflects the capacity or the strength of the
edge.
dtype : data type (float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver : string (default='full')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of nodes with betweenness centrality as the value.
See Also
--------
approximate_current_flow_betweenness_centrality
betweenness_centrality
edge_betweenness_centrality
edge_current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
time [1]_, where $I(n-1)$ is the time needed to compute the
inverse Laplacian. For a full matrix this is $O(n^3)$ but using
sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
Laplacian matrix condition number.
The space required is $O(nw)$ where $w$ is the width of the sparse
Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
https://doi.org/10.1007/978-3-540-31856-9_44
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
betweenness = dict.fromkeys(H, 0.0) # b[v]=0 for v in H
for row, (s, t) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
pos = dict(zip(row.argsort()[::-1], range(n)))
for i in range(n):
betweenness[s] += (i - pos[i]) * row[i]
betweenness[t] += (n - i - 1 - pos[i]) * row[i]
if normalized:
nb = (n - 1.0) * (n - 2.0) # normalization factor
else:
nb = 2.0
for v in H:
betweenness[v] = float((betweenness[v] - v) * 2.0 / nb)
return {ordering[k]: v for k, v in betweenness.items()}
@not_implemented_for("directed")
def edge_current_flow_betweenness_centrality(
G, normalized=True, weight=None, dtype=float, solver="full"
):
r"""Compute current-flow betweenness centrality for edges.
Current-flow betweenness centrality uses an electrical current
model for information spreading in contrast to betweenness
centrality which uses shortest paths.
Current-flow betweenness centrality is also known as
random-walk betweenness centrality [2]_.
Parameters
----------
G : graph
A NetworkX graph
normalized : bool, optional (default=True)
If True the betweenness values are normalized by 2/[(n-1)(n-2)] where
n is the number of nodes in G.
weight : string or None, optional (default=None)
Key for edge data used as the edge weight.
If None, then use 1 as each edge weight.
The weight reflects the capacity or the strength of the
edge.
dtype : data type (default=float)
Default data type for internal matrices.
Set to np.float32 for lower memory consumption.
solver : string (default='full')
Type of linear solver to use for computing the flow matrix.
Options are "full" (uses most memory), "lu" (recommended), and
"cg" (uses least memory).
Returns
-------
nodes : dictionary
Dictionary of edge tuples with betweenness centrality as the value.
Raises
------
NetworkXError
The algorithm does not support DiGraphs.
If the input graph is an instance of DiGraph class, NetworkXError
is raised.
See Also
--------
betweenness_centrality
edge_betweenness_centrality
current_flow_betweenness_centrality
Notes
-----
Current-flow betweenness can be computed in $O(I(n-1)+mn \log n)$
time [1]_, where $I(n-1)$ is the time needed to compute the
inverse Laplacian. For a full matrix this is $O(n^3)$ but using
sparse methods you can achieve $O(nm{\sqrt k})$ where $k$ is the
Laplacian matrix condition number.
The space required is $O(nw)$ where $w$ is the width of the sparse
Laplacian matrix. Worse case is $w=n$ for $O(n^2)$.
If the edges have a 'weight' attribute they will be used as
weights in this algorithm. Unspecified weights are set to 1.
References
----------
.. [1] Centrality Measures Based on Current Flow.
Ulrik Brandes and Daniel Fleischer,
Proc. 22nd Symp. Theoretical Aspects of Computer Science (STACS '05).
LNCS 3404, pp. 533-544. Springer-Verlag, 2005.
https://doi.org/10.1007/978-3-540-31856-9_44
.. [2] A measure of betweenness centrality based on random walks,
M. E. J. Newman, Social Networks 27, 39-54 (2005).
"""
from networkx.utils import reverse_cuthill_mckee_ordering
if not nx.is_connected(G):
raise nx.NetworkXError("Graph not connected.")
n = G.number_of_nodes()
ordering = list(reverse_cuthill_mckee_ordering(G))
# make a copy with integer labels according to rcm ordering
# this could be done without a copy if we really wanted to
H = nx.relabel_nodes(G, dict(zip(ordering, range(n))))
edges = (tuple(sorted((u, v))) for u, v in H.edges())
betweenness = dict.fromkeys(edges, 0.0)
if normalized:
nb = (n - 1.0) * (n - 2.0) # normalization factor
else:
nb = 2.0
for row, (e) in flow_matrix_row(H, weight=weight, dtype=dtype, solver=solver):
pos = dict(zip(row.argsort()[::-1], range(1, n + 1)))
for i in range(n):
betweenness[e] += (i + 1 - pos[i]) * row[i]
betweenness[e] += (n - i - pos[i]) * row[i]
betweenness[e] /= nb
return {(ordering[s], ordering[t]): float(v) for (s, t), v in betweenness.items()}
| [
37811,
11297,
12,
11125,
1022,
1108,
4318,
414,
5260,
526,
15931,
198,
11748,
3127,
87,
355,
299,
87,
198,
6738,
3127,
87,
13,
282,
7727,
907,
13,
31463,
414,
13,
11125,
62,
6759,
8609,
1330,
357,
198,
220,
220,
220,
29925,
818,
439... | 2.581299 | 4,588 |
# from requests_html import HTMLSession
# import re
# from multiprocessing import Pool, Manager, Process
# import pandas as pd
# from functools import partial
import json
import sys
# records = {} # index -> data
# count = 0
if __name__ == '__main__':
if len(sys.argv) != 2:
print("python3 read.py <json_filename>")
file = sys.argv[1]
if not sys.argv[1].endswith('.json'):
file += '.json'
read(file)
| [
2,
422,
7007,
62,
6494,
1330,
11532,
36044,
198,
2,
1330,
302,
198,
2,
422,
18540,
305,
919,
278,
1330,
19850,
11,
9142,
11,
10854,
198,
2,
1330,
19798,
292,
355,
279,
67,
198,
2,
422,
1257,
310,
10141,
1330,
13027,
198,
11748,
33... | 2.554913 | 173 |
#===============================================================================
# This file is part of Jwalk.
#
# Jwalk - A tool to calculate the solvent accessible surface distance (SASD)
# between crosslinked residues.
#
# Copyright 2016 Jwalk Inventor and Birkbeck College University of London.
# The Jwalk Inventor is: Josh Bullock
#
#
# Jwalk is available under Public Licence.
# This software is made available under GPL V3
#
# Please cite your use of Jwalk in published work:
#
# J.Bullock, J. Schwab, K. Thalassinos, M. Topf (2016)
# The importance of non-accessible crosslinks and solvent accessible surface distance
# in modelling proteins with restraints from crosslinking mass spectrometry.
# Molecular and Cellular Proteomics (15) pp.2491-2500
#
#===============================================================================
import sys
from math import cos, sin
import subprocess
def expand_points(atom,sphere):
""" Exapnd the unit sphere around specific x,y,z point and returns surface points"""
points = {}
atom.point_list = []
CH2 = 1.68
radius = {"N":1.43,
"O":1.30,
"C":1.68,
"S":1.67,
}
r = radius[atom.atom_name[0]] + CH2
(x,y,z) = (atom.x, atom.y, atom.z)
for s in sphere:
x1 = x + s[0]*r
y1 = y + s[1]*r
z1 = z + s[2]*r
points[x1,y1,z1] = 0
return points
def create_unit_sphere():
""" Generates a unit sphere with 30 points on the surface """
unit_sphere = []
unit_sphere.append( [0.0, 1.0, 0.0] )
unit_sphere.append( [0.0, -1.0, 0.0] )
nstep = 5
PI = 3.1415926536
theta = PI/nstep
arc = theta
for istep in range(nstep):
istep = istep + 1 # to change range from 0--9 to 1--10
y1 = cos(istep*theta)
r2 = sin(istep*theta)
ndot2= 2*PI*r2/arc # the circumference at that radius / proportion of pi
if ndot2 == 0.0:
continue
theta2 = 2*PI/ndot2
for idot in range(int(ndot2)):
idot = idot + 1 # to change range from 0-- to 1--
x2 = r2*cos(idot*theta2)
z2 = r2*sin(idot*theta2)
unit_sphere.append( [x2, y1, z2] )
return unit_sphere
def check_solvent_accessibility(prot,aa1_CA,xl_list = False):
'''
Checks solvent accessibility of residues in aa1_CA
Returns aa1_CA of solvent accessible residues.
Arguments
*prot*
Tempy structure instance
*aa1_CA*
residues of interest
'''
# dictionary for text output
string_dict = {"LYS":"lysines",
"CYS":"cysteines",
"ASP":"acidic residues",
"GLU":"acidic residues",
"VAL":"valines",
"ILE":"isoleucines",
"LEU":"leucines",
"ARG":"arginines",
"PRO":"prolines",
"GLY":"glycines",
"ALA":"alanines",
"TRP":"tryptophans",
"PHE":"phenylalanines",
"SER":"serines",
"GLN":"glutamines",
"HIS":"histidines",
"MET":"methionines",
"THR":"threonines",
"ASN":"asparagines",
"TYR":"tyrosines"
}
radius = {"N":1.43,
"O":1.30,
"C":1.68,
"S":1.67,
}
sd_res = False
# create sphere of 30 points to expand around atoms
sphere = create_unit_sphere()
SA_res = {}
CH2 = 1.68
# this is not very efficient ... freesasa implementation to come
for atom in prot.atomList:
atom.clash = 1
if (atom.res_no,atom.chain,atom.res) in aa1_CA:
sd_res = atom.res
# generate 30 points in unit spehere around atom
points = expand_points(atom,sphere)
for p in points:
(x,y,z) = (p[0],p[1],p[2])
# for every other atom check if points intersects with it
for atom2 in prot.atomList:
if atom2.res != atom.res or (atom2.res == atom.res and atom2.atom_name != atom.atom_name):
r = radius[atom2.atom_name[0]] + CH2
# need to transpose x,y,z
(tx,ty,tz) = (x-atom2.x,y-atom2.y,z-atom2.z)
# if the point lies within the sphere of that atom then it clashes
if tx**2 + ty**2 + tz**2 <= r**2:
points[p] = 1
break
# if any point on the sphere doesn't intersect with another then the atom is solvent accessible
if points[p] == 0:
atom.clash = 0
break
# if atom doesn't clash then residue information is kept in SA_res
if atom.clash == 0:
SA_res[atom.res_no,atom.chain,atom.res] = aa1_CA[atom.res_no,atom.chain,atom.res]
# inform user on buried resiudes
if xl_list:
pass
elif sd_res == "LYS":
print "%d %s and 1 N-terminus of which %d are on the surface" % (len(aa1_CA)-1,string_dict[sd_res], len(SA_res))
else:
print "%d %s of which %d are on the surface" % (len(aa1_CA),string_dict[sd_res], len(SA_res))
return SA_res
def update_crosslink_pairs(crosslink_pairs, aa1_CA, aa2_CA, remove_aa1, remove_aa2):
'''Removes buried residues from crosslink_pairs'''
buried_residues = []
index_to_delete = []
for i in xrange(len(crosslink_pairs)): # for each residue pair, check both are solvent accessible
x1, x2 = crosslink_pairs[i]
if x1 not in aa1_CA:
index_to_delete.append(i)
if x1 not in buried_residues:
buried_residues.append(x1)
if x2 not in aa2_CA and x2 not in buried_residues:
buried_residues.append(x2)
elif x2 not in aa2_CA:
index_to_delete.append(i)
if x2 not in buried_residues:
buried_residues.append(x2)
if [x1[0],x1[1]] in remove_aa1:
index_to_delete.append(i)
if x1 not in buried_residues:
buried_residues.append(x1)
if x2 in remove_aa2 and not x2 in buried_residues:
buried_residues.append(x2)
elif [x2[0],x2[1]] in remove_aa2:
index_to_delete.append(i)
if x2 not in buried_residues:
buried_residues.append(x2)
no_sasd_possible = []
crosslink_pairs_final = []
for i in xrange(len(crosslink_pairs)):
if i not in index_to_delete:
crosslink_pairs_final.append(crosslink_pairs[i])
else:
no_sasd_possible.append(crosslink_pairs[i])
if len(no_sasd_possible) > 0:
print "the following crosslinks cannot be calculated:"
for s in no_sasd_possible:
print "%s-%s-%s - %s-%s-%s" % (s[0][2],s[0][0],s[0][1],s[1][2],s[1][0],s[1][1])
return crosslink_pairs_final
| [
2,
23926,
25609,
18604,
198,
2,
220,
220,
220,
220,
770,
2393,
318,
636,
286,
449,
11152,
13,
198,
2,
220,
220,
220,
220,
220,
198,
2,
220,
220,
220,
220,
449,
11152,
532,
317,
2891,
284,
15284,
262,
43209,
9857,
4417,
5253,
357,
... | 1.897002 | 3,903 |
import riemann
from riemann import utils
from riemann.tx import shared
from riemann.script import serialization
from riemann.tx.shared import ByteData, VarInt
from typing import List, Optional, overload, Sequence, Tuple
class Outpoint(ByteData):
'''
An outpoint. A pointer to the previous output being consumed by the
associated input. It specifies the prevout by transaction id and index
within that transaction's output vector
NB: Args must be little-endian
Args:
tx_id: the 32-byte LE hash of the previous transaction
index: the 4-byte LE encoded index of the prevout in its transaction
Attributes:
tx_id: the 32-byte LE hash of the previous transaction
index: the 4-byte LE encoded index of the prevout in its transaction
'''
tx_id: bytes
index: bytes
def copy(self,
tx_id: Optional[bytes] = None,
index: Optional[bytes] = None) -> 'Outpoint':
'''
Make a new copy of the object with optional modifications.
'''
return Outpoint(
tx_id=tx_id if tx_id is not None else self.tx_id,
index=index if index is not None else self.index)
@classmethod
def null(Outpoint) -> 'Outpoint':
'''Make a null outpoint, as found in coinbase transactions'''
return Outpoint(b'\x00' * 32, b'\xff' * 4)
@classmethod
def from_bytes(Outpoint, byte_string: bytes) -> 'Outpoint':
'''
Parse an Outpoint from a bytestring. Also available as from_hex
'''
return Outpoint(
tx_id=byte_string[:32],
index=byte_string[32:36])
class TxIn(ByteData):
'''
A transaction input, composed of an outpoint, a script_sig and a sequence
number. Legacy TxIn script sigs contain spend authorization information for
the referenced UTXO. Compatibility TxIn script sigs contain the witness
program only. Segwit Txin script sigs are empty.
The sequence number is used to set relative timelocks. See
`this blog post <https://prestwi.ch/bitcoin-time-locks/>`_ for details.
Args:
outpoint: The `Outpoint` object pointing to the prevout being consumed
by this input
stack_script: The Script program that sets the initial stack, if any.
Legacy inputs are unsigned without this. Segwit inputs
never have this.
redeem_script: The Script program that controls spending, if any. Only
present when spending a Legacy or Compatibility SH UTXO.
Segwit inputs never have this.
sequence: The 4-byte LE encoded sequence number of the input. Can be
used to set relative timelocks.
Attributes:
outpoint: The `Outpoint` object pointing to the prevout being consumed
by this input
stack_script: The Script program that sets the initial stack, if any.
Legacy inputs are unsigned without this. Segwit inputs
never have this.
redeem_script: The Script program that controls spending, if any. Only
present when spending a Legacy or Compatibility SH UTXO.
Segwit inputs never have this.
script_sig: The combined stack_script and redeem_script. For Legacy and
Compatibility PKH transactions this will be equal to the
stack script.
sequence: The 4-byte LE encoded sequence number of the input. Can be
used to set relative timelocks.
'''
outpoint: Outpoint
stack_script: bytes
redeem_script: bytes
sequence: bytes
def copy(self,
outpoint: Optional[Outpoint] = None,
stack_script: Optional[bytes] = None,
redeem_script: Optional[bytes] = None,
sequence: Optional[bytes] = None) -> 'TxIn':
'''
Make a new copy of the object with optional modifications.
'''
return TxIn(
outpoint=outpoint if outpoint is not None else self.outpoint,
stack_script=(stack_script if stack_script is not None
else self.stack_script),
redeem_script=(redeem_script if redeem_script is not None
else self.redeem_script),
sequence=sequence if sequence is not None else self.sequence)
def is_p2sh(self) -> bool:
'''
Return True if the TxIn has a non-empty `redeem_script`
'''
return self.redeem_script != b''
@staticmethod
def _parse_script_sig(script_sig: bytes) -> Tuple[bytes, bytes]:
'''
Parse the scriptsig into a stack script and a redeem_script
byte_string -> (byte_string, byte_string)
'''
# Is there a better way to do this?
stack_script = script_sig
redeem_script = b''
try:
# If the last entry deserializes, it's a p2sh input
# There is a vanishingly small edge case where the pubkey
# forms a deserializable script.
# Edge case: serialization errors on CODESEPARATOR
deserialized = serialization.deserialize(script_sig)
items = deserialized.split()
serialization.hex_deserialize(items[-1])
stack_script = serialization.serialize(' '.join(items[:-1]))
redeem_script = serialization.serialize(items[-1])
except (IndexError, ValueError, NotImplementedError):
pass
return stack_script, redeem_script
@classmethod
def from_bytes(TxIn, byte_string: bytes) -> 'TxIn':
'''
Parse a TxIn from a bytestring. Also available as from_hex
'''
outpoint = Outpoint.from_bytes(byte_string[:36])
# Extract the script sig length
script_sig_len = VarInt.from_bytes(byte_string[36:45])
# Extract the script sig
script_start = 36 + len(script_sig_len)
script_end = script_start + script_sig_len.number
script_sig = byte_string[script_start:script_end]
sequence = byte_string[script_end:script_end + 4]
# If the script-sig is blank, both stack and redeem are blank
if script_sig == b'':
stack_script = b''
redeem_script = b''
# If the outpoint is null (coinbase), don't try parsing the script sig
elif outpoint == Outpoint.null():
stack_script = script_sig
redeem_script = b''
# Parse the script sig into stack and redeem
else:
stack_script, redeem_script = TxIn._parse_script_sig(script_sig)
return TxIn(
outpoint=outpoint,
stack_script=stack_script,
redeem_script=redeem_script,
sequence=sequence)
class TxOut(ByteData):
'''
A transaction output, composed of a value (in satoshi) and an output script
describing the spend conditions on the new UTXO.
Value is serialized as an 8-byte LE integer, measured in satoshi. Use the
`i2le_padded` function in :ref:`utils` to serialize integers.
TxOut accepts un-prepended output scripts, and adds their length for you.
Args:
value: the 8-byte LE encoded value of the output (in satoshi)
output_script: the non-length-prepended output script as a bytestring
Attributes:
value: the 8-byte LE encoded value of the output (in satoshi)
output_script: the non-length-prepended output script as a bytestring
'''
value: bytes
output_script: bytes
def copy(self,
value: Optional[bytes] = None,
output_script: Optional[bytes] = None) -> 'TxOut':
'''
Make a new copy of the object with optional modifications.
'''
return TxOut(
value=value if value is not None else self.value,
output_script=(output_script if output_script is not None
else self.output_script))
@classmethod
def from_bytes(TxOut, byte_string: bytes) -> 'TxOut':
'''
Parse a TxOut from a bytestring. Also available as from_hex
'''
n = VarInt.from_bytes(byte_string[8:])
script_start = 8 + len(n)
script_end = script_start + n.number
if n.number < 0xfc:
return TxOut(
value=byte_string[:8],
output_script=byte_string[script_start:script_end])
else:
raise NotImplementedError(
'No support for abnormally long pk_scripts.')
class WitnessStackItem(ByteData):
'''
A witness stack item. Each input witness is composed of an initial stack
to be evaluated by the witness program. Witnesses for P2WSH inputs have a
serialized script as the last stack element.
Args:
item: the raw data to be placed on the stack
Attributes:
item: the raw data to be placed on the stack
'''
@classmethod
def from_bytes(WitnessStackItem, byte_string: bytes) -> 'WitnessStackItem':
'''
Parse a WitnessStackItem from a bytestring. Also available as from_hex
'''
n = VarInt.from_bytes(byte_string)
item_start = len(n)
item_end = item_start + n.number
return WitnessStackItem(byte_string[item_start:item_end])
class InputWitness(ByteData):
'''
The witness for a Compatibility or Segwit TxIn. It consists of a stack that
will be evaluated by the witness program, represented as an ordered list of
`WitnessStackItem` objects.
Args:
stack: the ordered sequence of WitnessStackItems
Attributes:
stack: the ordered sequence of WitnessStackItems
'''
stack: Tuple[WitnessStackItem, ...]
def __init__(self, stack: Sequence[WitnessStackItem]):
'''
list(WitnessStackItem) -> InputWitness
'''
super().__init__()
for item in stack:
if not isinstance(item, WitnessStackItem):
raise ValueError(
'Invalid witness stack item. '
'Expected WitnessStackItem. Got {}'
.format(item))
self += VarInt(len(stack))
for item in stack:
self += item
self.stack = tuple(item for item in stack)
self._make_immutable()
@classmethod
def from_bytes(InputWitness, byte_string: bytes) -> 'InputWitness':
'''
Parse an InputWitness from a bytestring. Also available as from_hex
'''
stack_items = VarInt.from_bytes(byte_string)
item_start = len(stack_items)
items: List[WitnessStackItem] = []
while len(items) < stack_items.number:
item = WitnessStackItem.from_bytes(byte_string[item_start:])
item_start += len(item)
items.append(item)
return InputWitness(items)
def copy(self,
stack: Optional[List[WitnessStackItem]] = None) -> 'InputWitness':
'''
Make a new copy of the object with optional modifications.
'''
return InputWitness(
stack=stack if stack is not None else self.stack)
class Tx(ByteData):
'''
A complete transaction. It consists of a version, a flag that indicates the
presence of witnesses (and breaks legacy parsers), a length-prepended
vector of `TxIn` objects, a length-prepended vector of `TxOut` objects, and
a locktime number. Compatibility and Segwit transactions MUST contain the
witness flag. Signed Compatibility and Segwit transactions will
additionally contain a vector of `InputWitness` objects.
This object provides a number of conveniences for interacting with
transactions, including `tx_id` calculation, and sighash calculation.
To serialize the transaction, call `to_bytes()` or `hex()`.
Note:
The `lock_time` field is used to set absolute timelocks.
These are complex and confusing. See
`this blog post <https://prestwi.ch/bitcoin-time-locks/>`_ for details.
Args:
version: the 4-byte LE version number. Must be 1 or 2. Setting to 1
deactivates relative lock times.
tx_ins: the ordered sequence of TxIn objects representing TXOs.
consumed by this transaction. Signed Legacy transaction will
include spend authorization here.
tx_outs: the ordered sequence of TxOut objects representing TXOs
created by this transaction.
tx_witnesses: the ordered sequence of InputWitness objects associated
with this transaction. Always empty in Legacy
transactions. In Compatibility and Segwit transactions
there must be one witness per input.
lock_time: the 4-byte LE locktime number. Setting this invokes the
absolute time lock system. If it is below 500,000,000 it is
interpreted as a blockheight before which the transaction is
invalid. If set above that, it is interpreted as a Unix
timestamp before which the transaction is invalid.
Attributes:
version: the 4-byte LE version number. Must be 1 or 2. Setting to 1
deactivates relative lock times.
flag: the 2-byte witness transaction flag. Always empty for Legacy
transaction, or '0001' for Compatibility and Witness transactions
tx_ins: the ordered sequence of TxIn objects representing TXOs.
consumed by this transaction. Signed Legacy transaction will
include spend authorization here.
tx_outs: the ordered sequence of TxOut objects representing TXOs
created by this transaction.
tx_witnesses: the ordered sequence of InputWitness objects associated
with this transaction. Always empty in Legacy
transactions. In Compatibility and Segwit transactions
there must be one witness per input.
lock_time: the 4-byte LE locktime number. Setting this invokes the
absolute time lock system. If it is below 500,000,000 it is
interpreted as a blockheight before which the transaction is
invalid. If set above that, it is interpreted as a Unix
timestamp before which the transaction is invalid.
tx_id_le: the LE (in-protocol) hash committed to by the block header
transaction merkle tree.
wtx_id_le: the LE (in-protocol) hash committed to by the coinbase
transaction witness merkle tree. Not present in Legacy
transactions.
tx_id: the BE (block explorer or human-facing) tx_id.
wtx_id: the BE (block explorer or human-facing) wtx_id. Not present in
Legacy transactions.
'''
version: bytes
flag: Optional[bytes]
tx_ins: Tuple[TxIn, ...]
tx_outs: Tuple[TxOut, ...]
tx_witnesses: Optional[Tuple[InputWitness, ...]]
lock_time: bytes
tx_id_le: bytes
wtx_id_le: Optional[bytes]
tx_id: bytes
wtx_id: Optional[bytes]
@classmethod
def from_hex(Tx, hex_string: str) -> 'Tx':
'''Instantiate a Tx object from a hex string'''
return Tx.from_bytes(bytes.fromhex(hex_string))
@classmethod
def from_bytes(Tx, byte_string: bytes) -> 'Tx':
'''Instantiate a Tx object from a bytestring'''
# Get the version number
version = byte_string[0:4]
# Check if this is a witness tx
if byte_string[4:6] == riemann.network.SEGWIT_TX_FLAG:
tx_ins_num_loc = 6
flag = riemann.network.SEGWIT_TX_FLAG
else:
tx_ins_num_loc = 4
flag = None
# Get the length of the tx_in vector
tx_ins = []
tx_ins_num = VarInt.from_bytes(byte_string[tx_ins_num_loc:])
# `current` is the index of next read
current = tx_ins_num_loc + len(tx_ins_num)
# Deserialize all tx_ins
for _ in range(tx_ins_num.number):
tx_in = TxIn.from_bytes(byte_string[current:])
current += len(tx_in)
tx_ins.append(tx_in)
# Get the length of the tx_out vector
tx_outs = []
tx_outs_num = VarInt.from_bytes(byte_string[current:])
# Deserialize all outputs
current += len(tx_outs_num)
for _ in range(tx_outs_num.number):
tx_out = TxOut.from_bytes(byte_string[current:])
current += len(tx_out)
tx_outs.append(tx_out)
# Deserialize all witnesses if necessary
tx_witnesses: List[InputWitness] = []
if flag and len(byte_string[current:]) > 4:
tx_witnesses_num = tx_ins_num
for _ in range(tx_witnesses_num.number):
tx_witness = InputWitness.from_bytes(byte_string[current:])
current += len(tx_witness)
tx_witnesses.append(tx_witness)
# Get the lock time and return a complete tx
lock_time = byte_string[current:current + 4]
return Tx(
version=version,
flag=flag,
tx_ins=tx_ins,
tx_outs=tx_outs,
tx_witnesses=tx_witnesses,
lock_time=lock_time)
def no_witness(self) -> bytes:
'''
Return the Tx as a bytestring stripped of witnesses. This is the
preimage of `tx_id` and `tx_id_le`.
'''
tx = bytes()
tx += self.version
tx += VarInt(len(self.tx_ins)).to_bytes()
for tx_in in self.tx_ins:
tx += tx_in.to_bytes()
tx += VarInt(len(self.tx_outs)).to_bytes()
for tx_out in self.tx_outs:
tx += tx_out.to_bytes()
tx += self.lock_time
return tx
def is_witness(self) -> bool:
'''Return True if the transaction witness flag is set'''
return self.flag is not None or self.tx_witnesses is not None
def calculate_fee(self, input_values: Sequence[int]) -> int:
'''
Calculate the fee associated with a transaction. Caller must provide a
sequence representing the value (in satoshi) of each input.
Args:
input_values: The value of each input in order.
Returns:
The total fee paid to miners by this transaction.
'''
return \
sum(input_values) \
- sum([utils.le2i(o.value) for o in self.tx_outs])
def sighash_none(self) -> bytes:
'''SIGHASH_NONE is a bad idea.'''
raise NotImplementedError('SIGHASH_NONE is a bad idea.')
def copy(self,
version: Optional[bytes] = None,
flag: Optional[bytes] = None,
tx_ins: Optional[Sequence[TxIn]] = None,
tx_outs: Optional[Sequence[TxOut]] = None,
tx_witnesses: Optional[Sequence[InputWitness]] = None,
lock_time: Optional[bytes] = None) -> 'Tx':
'''
Make a new copy of the object with optional modifications.
'''
return Tx(version=version if version is not None else self.version,
flag=flag if flag is not None else self.flag,
tx_ins=tx_ins if tx_ins is not None else self.tx_ins,
tx_outs=tx_outs if tx_outs is not None else self.tx_outs,
tx_witnesses=(tx_witnesses if tx_witnesses is not None
else self.tx_witnesses),
lock_time=(lock_time if lock_time is not None
else self.lock_time))
def _sighash_prep(self, index: int, script: bytes) -> 'Tx':
'''
Sighashes suck
Performs the sighash setup described here:
https://en.bitcoin.it/wiki/OP_CHECKSIG#How_it_works
https://bitcoin.stackexchange.com/questions/3374/how-to-redeem-a-basic-tx
We save on complexity by refusing to support OP_CODESEPARATOR
'''
# 0 out scripts in tx_ins
copy_tx_ins = [tx_in.copy(stack_script=b'', redeem_script=b'')
for tx_in in self.tx_ins]
# NB: The script for the current transaction input in txCopy is set to
# subScript (lead in by its length as a var-integer encoded!)
to_strip = VarInt.from_bytes(script)
copy_tx_ins[index] = \
copy_tx_ins[index].copy(redeem_script=script[len(to_strip):])
return self.copy(tx_ins=copy_tx_ins)
@overload
@overload # noqa: F811
@overload # noqa: F811
@overload # noqa: F811
def sighash_all(self, # noqa: F811
index,
script,
prevout_value=None,
anyone_can_pay=False) -> bytes:
'''
Calculate the hash to be signed when adding authorization information
(a script sig or a witness) to an input using SIGHASH_ALL.
SIGHASH_ALL commits to ALL inputs, and ALL outputs. It indicates that
no further modification of the transaction is allowed without
invalidating the signature.
SIGHASH_ALL + ANYONECANPAY commits to ONE input and ALL outputs. It
indicates that anyone may add additional value to the transaction, but
that no one may modify the payments made. Any extra value added above
the sum of output values will be given to miners as part of the tx fee.
We must specify the index of the input in the `tx_ins` sequence, the
script controlling the TXO being spent by the input, and whether to use
the ANYONECANPAY sighash modifier. Compatibility and Witness inputs
must additionally supply the value of the TXO being consumed.
This function automatically selects between Legacy, Witness, and Bcash
SIGHASH_FORKID based on the network selected, and whether the witness
flag is present in the transaction.
For Legacy sighash documentation, see here:
- https://en.bitcoin.it/wiki/OP_CHECKSIG#Hashtype_SIGHASH_ALL_.28default.29
For BIP143 (Witness and Compatibility) documentation, see here:
- https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
For the BitcoinCash specific rip-off of BIP143 documentation, see here:
- https://github.com/bitcoincashorg/spec/blob/master/replay-protected-sighash.md
Note:
After signing the digest, you MUST append the sighash indicator
byte to the resulting signature. This will be 0x01 (SIGHASH_ALL) or
0x81 (SIGHASH_ALL + SIGHASH_ANYONECANPAY).
Args:
index: The index of the input being authorized
script: The length-prepended script associated with the TXO being
spent. For PKH outputs this will be a pkh spend script (
i.e. '1976a914....88ac'). For SH outputs this will be the
redeem_script (Legacy) or Witness Script (Compatibility and
Segwit). If the TXO being spent has a non-standard output
script, use that here.
prevout_value: The 8-byte LE integer-encoded value of the prevout
anyone_can_pay: True if using the ANYONECANPAY sighash modifier
Returns:
The 32-byte digest to be signed.
''' # noqa: E501
if riemann.network.FORKID is not None:
return self._sighash_forkid(index=index,
script=script,
prevout_value=prevout_value,
sighash_type=shared.SIGHASH_ALL,
anyone_can_pay=anyone_can_pay)
if self.is_witness():
return self.segwit_sighash(
index=index,
script=script,
prevout_value=prevout_value,
sighash_type=shared.SIGHASH_ALL,
anyone_can_pay=anyone_can_pay)
copy_tx = self._sighash_prep(index=index, script=script)
if anyone_can_pay:
return self._sighash_anyone_can_pay(
index=index, copy_tx=copy_tx, sighash_type=shared.SIGHASH_ALL)
return self._sighash_final_hashing(copy_tx, shared.SIGHASH_ALL)
@overload
@overload # noqa: F811
@overload # noqa: F811
@overload # noqa: F811
def sighash_single(self, # noqa: F811
index,
script,
prevout_value=None,
anyone_can_pay=False):
'''
Calculate the hash to be signed when adding authorization information
(a script sig or a witness) to an input using SIGHASH_SINGLE.
SIGHASH_SINGLE commits to ALL inputs, and ONE output. It indicates that/
anyone may append additional outputs to the transaction to reroute
funds from the inputs. Additional inputs cannot be added without
invalidating the signature. It is logically difficult to use securely,
as it consents to funds being moved, without specifying their
destination.
SIGHASH_SINGLE commits specifically the the output at the same index as
the input being signed. If there is no output at that index, (because,
e.g. the input vector is longer than the output vector) it behaves
insecurely, and we do not implement that protocol bug.
SIGHASH_SINGLE + ANYONECANPAY commits to ONE input and ONE output. It
indicates that anyone may add additional value to the transaction, and
route value to any other location. The signed input and output must be
included in the fully-formed transaction at the same index in their
respective vectors.
When the input is larger than the output, a partial transaction signed
this way cedes the difference to whoever cares to construct a complete
transaction. However, when the output is larger than the input, it
functions as a one-time-use payment invoice. Anyone may consume the
input by adding value. This is useful for addressing race conditions in
certain cross-chain protocols that the author of this documentation
invented. :)
We must specify the index of the input in the `tx_ins` sequence, the
script controlling the TXO being spent by the input, and whether to use
the ANYONECANPAY sighash modifier. Compatibility and Witness inputs
must additionally supply the value of the TXO being consumed.
This function automatically selects between Legacy, Witness, and Bcash
SIGHASH_FORKID based on the network selected, and whether the witness
flag is present in the transaction.
For Legacy sighash documentation, see here:
- https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_SINGLE
- https://bitcoin.stackexchange.com/questions/3890/for-sighash-single-do-the-outputs-other-than-at-the-input-index-have-8-bytes-or
- https://github.com/petertodd/python-bitcoinlib/blob/051ec4e28c1f6404fd46713c2810d4ebbed38de4/bitcoin/core/script.py#L913-L965
For BIP143 (Witness and Compatibility) documentation, see here:
- https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
For the BitcoinCash specific rip-off of BIP143 documentation, see here:
- https://github.com/bitcoincashorg/spec/blob/master/replay-protected-sighash.md
Note:
After signing the digest, you MUST append the sighash indicator
byte to the resulting signature. This will be 0x03 (SIGHASH_SINGLE)
or 0x83 (SIGHASH_SINGLE + SIGHASH_ANYONECANPAY).
Args:
index: The index of the input being authorized
script: The length-prepended script associated with the TXO being
spent. For PKH outputs this will be a pkh spend script (
i.e. '1976a914....88ac'). For SH outputs this will be the
redeem_script (Legacy) or Witness Script (Compatibility and
Segwit). If the TXO being spent has a non-standard output
script, use that here.
prevout_value: The 8-byte LE integer-encoded value of the prevout
anyone_can_pay: True if using the ANYONECANPAY sighash modifier
Returns:
The 32-byte digest to be signed.
''' # noqa: E501
if index >= len(self.tx_outs):
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.')
if riemann.network.FORKID is not None:
return self._sighash_forkid(index=index,
script=script,
prevout_value=prevout_value,
sighash_type=shared.SIGHASH_SINGLE,
anyone_can_pay=anyone_can_pay)
if self.is_witness():
return self.segwit_sighash(
index=index,
script=script,
prevout_value=prevout_value,
sighash_type=shared.SIGHASH_SINGLE,
anyone_can_pay=anyone_can_pay)
copy_tx = self._sighash_prep(index=index, script=script)
# Remove outputs after the one we're signing
# Other tx_outs are set to -1 value and null scripts
copy_tx_outs = list(copy_tx.tx_outs[:index + 1])
copy_tx_outs = [TxOut(value=b'\xff' * 8, output_script=b'')
for _ in copy_tx.tx_ins] # Null them all
copy_tx_outs[index] = copy_tx.tx_outs[index] # Fix the current one
# Other tx_ins sequence numbers are set to 0
copy_tx_ins = [tx_in.copy(sequence=b'\x00\x00\x00\x00')
for tx_in in copy_tx.tx_ins] # Set all to 0
copy_tx_ins[index] = copy_tx.tx_ins[index] # Fix the current one
copy_tx = copy_tx.copy(
tx_ins=copy_tx_ins,
tx_outs=copy_tx_outs)
if anyone_can_pay: # Forward onwards
return self._sighash_anyone_can_pay(
index, copy_tx, shared.SIGHASH_SINGLE)
return self._sighash_final_hashing(copy_tx, shared.SIGHASH_SINGLE)
def segwit_sighash(self,
index: int,
sighash_type: int,
prevout_value: bytes,
script: bytes,
anyone_can_pay: bool = False) -> bytes:
'''
Implements bip143 (witness) sighash. Prefer calling `sighash_all` or
`sighash_single`.
For documentation see here:
https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
For an excellent pasta dinner, see here:
https://ricette.giallozafferano.it/Spaghetti-alla-Norma.html
'''
data = ByteData()
# 1. nVersion of the transaction (4-byte little endian)
data += self.version
# 2. hashPrevouts (32-byte hash)
data += self._hash_prevouts(anyone_can_pay=anyone_can_pay)
# 3. hashSequence (32-byte hash)
data += self._hash_sequence(sighash_type=sighash_type,
anyone_can_pay=anyone_can_pay)
# 4. outpoint (32-byte hash + 4-byte little endian)
data += self.tx_ins[index].outpoint
# 5. scriptCode of the input (serialized as scripts inside CTxOuts)
data += script
# 6. value of the output spent by this input (8-byte little endian)
data += prevout_value
# 7. nSequence of the input (4-byte little endian)
data += self.tx_ins[index].sequence
# 8. hashOutputs (32-byte hash)
data += self._hash_outputs(index=index, sighash_type=sighash_type)
# 9. nLocktime of the transaction (4-byte little endian)
data += self.lock_time
# 10. sighash type of the signature (4-byte little endian)
data += self._segwit_sighash_adjustment(sighash_type=sighash_type,
anyone_can_pay=anyone_can_pay)
return utils.hash256(data.to_bytes())
def _sighash_anyone_can_pay(
self,
index: int,
copy_tx: 'Tx',
sighash_type: int) -> bytes:
'''
int, byte-like, Tx, int -> bytes
Applies SIGHASH_ANYONECANPAY procedure.
Should be called by another SIGHASH procedure.
Not on its own.
https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY
'''
# The txCopy input vector is resized to a length of one.
copy_tx_ins = [copy_tx.tx_ins[index]]
copy_tx = copy_tx.copy(tx_ins=copy_tx_ins)
return self._sighash_final_hashing(
copy_tx, sighash_type | shared.SIGHASH_ANYONECANPAY)
def _sighash_final_hashing(
self,
copy_tx: 'Tx',
sighash_type: int) -> bytes:
'''
Tx, int -> bytes
Returns the hash that should be signed
https://en.bitcoin.it/wiki/OP_CHECKSIG#Procedure_for_Hashtype_SIGHASH_ANYONECANPAY
'''
sighash = ByteData()
sighash += copy_tx.to_bytes()
sighash += utils.i2le_padded(sighash_type, 4)
return utils.hash256(sighash.to_bytes())
def _hash_sequence(self, sighash_type: int, anyone_can_pay: bool) -> bytes:
'''BIP143 hashSequence implementation
Args:
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
anyone_can_pay (bool): true if ANYONECANPAY should be set
Returns:
(bytes): the hashSequence, a 32 byte hash
'''
if anyone_can_pay or sighash_type == shared.SIGHASH_SINGLE:
# If any of ANYONECANPAY, SINGLE sighash type is set,
# hashSequence is a uint256 of 0x0000......0000.
return b'\x00' * 32
else:
# hashSequence is the double SHA256 of nSequence of all inputs;
sequences = ByteData()
for tx_in in self.tx_ins:
sequences += tx_in.sequence
return utils.hash256(sequences.to_bytes())
def _hash_outputs(self, index: int, sighash_type: int) -> bytes:
'''BIP143 hashOutputs implementation
Args:
index (int): index of input being signed
sighash_type (int): SIGHASH_SINGLE or SIGHASH_ALL
Returns:
(bytes): the hashOutputs, a 32 byte hash
'''
if sighash_type == shared.SIGHASH_ALL:
# If the sighash type is ALL,
# hashOutputs is the double SHA256 of all output amounts
# paired up with their scriptPubKey;
outputs = ByteData()
for tx_out in self.tx_outs:
outputs += tx_out.to_bytes()
return utils.hash256(outputs.to_bytes())
elif (sighash_type == shared.SIGHASH_SINGLE
and index < len(self.tx_outs)):
# if sighash type is SINGLE
# and the input index is smaller than the number of outputs,
# hashOutputs is the double SHA256 of the output at the same index
return utils.hash256(self.tx_outs[index].to_bytes())
else:
# Otherwise, hashOutputs is a uint256 of 0x0000......0000
raise NotImplementedError(
'I refuse to implement the SIGHASH_SINGLE bug.')
def _sighash_forkid(
self,
index: int,
script: bytes,
prevout_value: bytes,
sighash_type: int,
anyone_can_pay: bool = False):
'''
https://github.com/bitcoincashorg/spec/blob/master/replay-protected-sighash.md
'''
self.validate_bytes(prevout_value, 8)
data = ByteData()
# 1. nVersion of the transaction (4-byte little endian)
data += self.version
# 2. hashPrevouts (32-byte hash)
data += self._hash_prevouts(anyone_can_pay=anyone_can_pay)
# 3. hashSequence (32-byte hash)
data += self._hash_sequence(sighash_type=sighash_type,
anyone_can_pay=anyone_can_pay)
# 4. outpoint (32-byte hash + 4-byte little endian)
data += self.tx_ins[index].outpoint
# 5. scriptCode of the input (serialized as scripts inside CTxOuts)
data += script
# 6. value of the output spent by this input (8-byte little endian)
data += prevout_value
# 7. nSequence of the input (4-byte little endian)
data += self.tx_ins[index].sequence
# 8. hashOutputs (32-byte hash)
data += self._hash_outputs(index=index, sighash_type=sighash_type)
# 9. nLocktime of the transaction (4-byte little endian)
data += self.lock_time
# 10. sighash type of the signature (4-byte little endian)
data += self._forkid_sighash_adjustment(sighash_type=sighash_type,
anyone_can_pay=anyone_can_pay)
return utils.hash256(data.to_bytes())
| [
11748,
374,
26597,
1236,
198,
6738,
374,
26597,
1236,
1330,
3384,
4487,
198,
6738,
374,
26597,
1236,
13,
17602,
1330,
4888,
198,
6738,
374,
26597,
1236,
13,
12048,
1330,
11389,
1634,
198,
6738,
374,
26597,
1236,
13,
17602,
13,
28710,
13... | 2.285172 | 16,320 |
#!/usr/bin/python3
import argparse
import paho.mqtt.client as mqtt
import datetime
from time import sleep
from binascii import unhexlify
from random import randint
from pycomfoconnect import *
import getopt
## Configuration #######################################################################################################
local_name = 'OpenHAB2 ComfoConnect Gateway' # Name of the service
local_uuid = bytes.fromhex('00000000000000000000000000000005') # Can be what you want, used to differentiate devices (as only 1 simultaneously connected device is allowed)
device_ip = "192.168.1.7" # Look in your router administration and get the ip of the comfoconnect device and set it as static lease
device_uuid = bytes.fromhex('00000000001710138001144fd71e1a11') # Get this from using discovery first by running the script with flag: -d <ip-address> and then configure it here
pin = 1234 # Set PIN of vent unit !
mqtt_broker = "192.168.1.50" # Set your MQTT broker here
mqtt_user = "my_user" # Set the MQTT user login
mqtt_passw = "my_pw" # Set the MQTT user password
mqtt_topic = "Zehnder/ComfoAirQ350/" # Set the MQTT root topic
## Start logger ########################################################################################################
## Connect to Comfocontrol device #####################################################################################
bridge = Bridge(device_ip, device_uuid)
#bridge.debug = True
comfoconnect = ComfoConnect(bridge, local_uuid, local_name, pin)
previousreply = b'\x01'
prevspeed = 0
prevmode = 0
prevalt = 0
prevvalue = {81 : 0, 213 : 0, 122 : 0, 121 : 0 }
#client = mqtt.Client(client_id="S1_%s" % randint(1, 10000), clean_session=False)
#client.on_connect = sub_on_connect
#client.on_disconnect = sub_on_disconnect
#client.username_pw_set(mqtt_user,mqtt_passw)
#client.connect(mqtt_broker)
#client.subscribe("Zehnder/ComfoAirQ450/ExecuteFunction",qos=1)
#client.on_message=on_message #attach function to callback
#client.loop_start()
#clientpub = mqtt.Client(client_id="P1_%s" % randint(1, 10000), clean_session=False)
#clientpub.on_connect = pub_on_connect
#clientpub.on_disconnect = pub_on_disconnect
#clientpub.username_pw_set(mqtt_user,mqtt_passw)
#clientpub.connect(mqtt_broker)
#clientpub.loop_start()
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
11748,
1822,
29572,
198,
11748,
279,
17108,
13,
76,
80,
926,
13,
16366,
355,
285,
80,
926,
198,
11748,
4818,
8079,
198,
6738,
640,
1330,
3993,
198,
6738,
9874,
292,
979,
72,
1330,
555,
... | 3.040208 | 771 |
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from typing import Union
def now() -> datetime:
"""Return now as a UTC timestamp"""
return datetime.now(timezone.utc)
class TimeStamp:
"""Representation of a timestamp with an integer number of seconds"""
# Access the underlying unix timestamp
ts = property(lambda self: self._ts)
_ts: int
def __init__(self, ts: [int, float]):
"""Create from unix timestamp"""
self._ts = round(ts)
@classmethod
@property
def dt(self) -> datetime:
"""Timestamp as a datetime object"""
return datetime.fromtimestamp(self._ts)
@property
| [
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
6738,
4818,
8079,
1330,
640,
11340,
198,
6738,
19720,
1330,
4479,
628,
198,
4299,
783,
3419,
4613,
4818,
8079,
25,
198,
220,
220,
220,
37227,
13615,
783... | 2.879668 | 241 |
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.opsworks
from troposphere.opsworks import (
AutoScalingThresholds as _AutoScalingThresholds,
BlockDeviceMapping as _BlockDeviceMapping,
ChefConfiguration as _ChefConfiguration,
DataSource as _DataSource,
EbsBlockDevice as _EbsBlockDevice,
ElasticIp as _ElasticIp,
EngineAttribute as _EngineAttribute,
Environment as _Environment,
LifeCycleConfiguration as _LifeCycleConfiguration,
LoadBasedAutoScaling as _LoadBasedAutoScaling,
RdsDbInstance as _RdsDbInstance,
Recipes as _Recipes,
ShutdownEventConfiguration as _ShutdownEventConfiguration,
Source as _Source,
SslConfiguration as _SslConfiguration,
StackConfigurationManager as _StackConfigurationManager,
Tags as _Tags,
TimeBasedAutoScaling as _TimeBasedAutoScaling,
VolumeConfiguration as _VolumeConfiguration,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
1212,
2438,
318,
8295,
7560,
422,
14673,
22829,
62,
9830,
13,
8189,
62,
8612,
1352,
13,
834,
15003,
834,
13,
9078,
14750,
13,
198,
37811,
198,
198,
11... | 3.093677 | 427 |
#!/usr/bin/python
import argparse
from . import *
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(
description="A simple usage of the Telegram Bot API.", allow_abbrev=True
)
arg_parser.add_argument("--chat_id", type=str, help="sets the chat_id in settings")
arg_parser.add_argument("--token", type=str, help="sets the bot token in settings")
arg_parser.add_argument(
"--message", type=str, help="specifies the message to send to chat"
)
args = arg_parser.parse_args()
set_config_options(chat_id=args.chat_id, token=args.token)
if args.message:
if not validate_config(get_config()):
raise InvalidConfigError(
"Settings not valid. Use --token and --chat_id options to set settings entries."
)
print(send_message(args.message))
elif args.message == "":
raise EmptyMessageError("Cannot use an empty string with --message option.")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
1822,
29572,
198,
6738,
764,
1330,
1635,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
1822,
62,
48610,
796,
1822,
29572,
13,
28100,
1713,... | 2.642077 | 366 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import six
import uuid
from oslo_serialization import jsonutils
from oslo_utils import encodeutils
import prettytable
from karborclient.common.apiclient import exceptions
# Decorator for cli-args
def env(*vars, **kwargs):
"""Search for the first defined of possibly many env vars
Returns the first environment variable defined in vars, or
returns the default defined in kwargs.
"""
for v in vars:
value = os.environ.get(v)
if value:
return value
return kwargs.get('default', '')
def print_list(objs, fields, exclude_unavailable=False, formatters=None,
sortby_index=0):
'''Prints a list of objects.
@param objs: Objects to print
@param fields: Fields on each object to be printed
@param exclude_unavailable: Boolean to decide if unavailable fields are
removed
@param formatters: Custom field formatters
@param sortby_index: Results sorted against the key in the fields list at
this index; if None then the object order is not
altered
'''
formatters = formatters or {}
mixed_case_fields = ['serverId']
removed_fields = []
rows = []
for o in objs:
row = []
for field in fields:
if field in removed_fields:
continue
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
if type(o) == dict and field in o:
data = o[field]
else:
if not hasattr(o, field_name) and exclude_unavailable:
removed_fields.append(field)
continue
else:
data = getattr(o, field_name, '')
if data is None:
data = '-'
if isinstance(data, six.string_types) and "\r" in data:
data = data.replace("\r", " ")
row.append(data)
rows.append(row)
for f in removed_fields:
fields.remove(f)
pt = prettytable.PrettyTable((f for f in fields), caching=False)
pt.align = 'l'
for row in rows:
pt.add_row(row)
if sortby_index is None:
order_by = None
else:
order_by = fields[sortby_index]
_print(pt, order_by)
def dict_prettyprint(val):
"""dict pretty print formatter.
:param val: dict.
:return: formatted json string.
"""
return jsonutils.dumps(val, indent=2, sort_keys=True)
def json_prettyprint(val):
"""json pretty print formatter.
:param val: json string.
:return: formatted json string.
"""
return val and jsonutils.dumps(jsonutils.loads(val),
indent=2, sort_keys=True)
def find_resource(manager, name_or_id, *args, **kwargs):
"""Helper for the _find_* methods."""
# first try to get entity as integer id
try:
if isinstance(name_or_id, int) or name_or_id.isdigit():
return manager.get(int(name_or_id), *args, **kwargs)
except exceptions.NotFound:
pass
# now try to get entity as uuid
try:
uuid.UUID(str(name_or_id))
return manager.get(name_or_id, *args, **kwargs)
except (ValueError, exceptions.NotFound):
pass
# finally try to find entity by name
try:
return manager.find(name=name_or_id)
except exceptions.NotFound:
msg = "No %s with a name or ID of '%s' exists." % \
(manager.resource_class.__name__.lower(), name_or_id)
raise exceptions.CommandError(msg)
| [
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
220,
220,
... | 2.297283 | 1,951 |
import os.path as osp
import numpy as np
from scipy.io import loadmat
from .base import BaseDataset
| [
11748,
28686,
13,
6978,
355,
267,
2777,
201,
198,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
6738,
629,
541,
88,
13,
952,
1330,
3440,
6759,
201,
198,
201,
198,
6738,
764,
8692,
1330,
7308,
27354,
292,
316,
201,
198,
201,
198... | 2.55814 | 43 |
from unittest import TestCase
from DeckofCards import Deck
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
6738,
20961,
1659,
34,
1371,
1330,
20961,
628
] | 3.75 | 16 |
##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Structured Text Renderer Classes
"""
__docformat__ = 'restructuredtext'
import re
from zope.component import adapter
from zope.interface import implementer
from zope.structuredtext.document import Document
from zope.structuredtext.html import HTML
from zope.publisher.browser import BrowserView
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.app.renderer.i18n import ZopeMessageFactory as _
from zope.app.renderer.interfaces import ISource, IHTMLRenderer
from zope.app.renderer import SourceFactory
class IStructuredTextSource(ISource):
"""Marker interface for a structured text source. Note that an
implementation of this interface should always derive from unicode or
behave like a unicode class."""
StructuredTextSourceFactory = SourceFactory(
IStructuredTextSource, _("Structured Text (STX)"),
_("Structured Text (STX) Source"))
@implementer(IHTMLRenderer)
@adapter(IStructuredTextSource, IBrowserRequest)
class StructuredTextToHTMLRenderer(BrowserView):
r"""A view to convert from Plain Text to HTML.
Example::
>>> from zope.app.renderer import text_type
>>> from zope.publisher.browser import TestRequest
>>> source = StructuredTextSourceFactory(u'This is source.')
>>> renderer = StructuredTextToHTMLRenderer(source, TestRequest())
>>> rendered = renderer.render()
>>> isinstance(rendered, text_type)
True
>>> print(rendered)
<p>This is source.</p>
<BLANKLINE>
Make sure that unicode works as well::
>>> source = StructuredTextSourceFactory(u'This is \xc3\x9c.')
>>> renderer = StructuredTextToHTMLRenderer(source, TestRequest())
>>> rendered = renderer.render()
>>> isinstance(rendered, text_type)
True
>>> print(rendered)
<p>This is ...</p>
<BLANKLINE>
"""
def render(self):
"See zope.app.interfaces.renderer.IHTMLRenderer"
doc = Document()(self.context)
html = HTML()(doc)
# strip html & body added by some zope versions
html = re.sub(
r'(?sm)^<html.*<body.*?>\n(.*)</body>\n</html>\n',
r'\1',
html)
return html
| [
29113,
29113,
7804,
4242,
2235,
198,
2,
198,
2,
15069,
357,
66,
8,
5816,
1168,
3008,
5693,
290,
25767,
669,
13,
198,
2,
1439,
6923,
33876,
13,
198,
2,
198,
2,
770,
3788,
318,
2426,
284,
262,
8617,
286,
262,
1168,
3008,
5094,
13789... | 2.975866 | 953 |
import unittest
import math
import numpy as np
from serializer import Serializer
if __name__ == '__main__':
unittest.main() | [
11748,
555,
715,
395,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
11389,
7509,
1330,
23283,
7509,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
... | 3.047619 | 42 |
import pytest
from tornado import gen
@pytest.mark.gen_test
@pytest.mark.gen_test(run_sync=False)
| [
11748,
12972,
9288,
198,
6738,
33718,
1330,
2429,
628,
198,
31,
9078,
9288,
13,
4102,
13,
5235,
62,
9288,
628,
198,
31,
9078,
9288,
13,
4102,
13,
5235,
62,
9288,
7,
5143,
62,
27261,
28,
25101,
8,
198
] | 2.684211 | 38 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from otwstest.schema.tree_of_life.node_info import validate
from otwstest import all_api_versions, not_v2_version
@all_api_versions
@all_api_versions
@not_v2_version
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
6738,
267,
4246,
301,
395,
13,
15952,
2611,
13,
21048,
62,
1659,
62,
6042,
13,
17440,
62,
10951,
1330,
26571,
19... | 2.460674 | 89 |
"""
Defaults for config object which is used as a singleton object in project
"""
from yacs.config import CfgNode as CN
_C = CN()
_C.EXPERIMENT = CN()
_C.EXPERIMENT.OUTPUT_FOLDER = "./outputs"
_C.EXPERIMENT.TENSORBOARD_FOLDER = "./runs"
_C.EXPERIMENT.RESUME_TRAINING_ON_RESTART = True
_C.EXPERIMENT.NUM_DATALOADER_WORKERS = 0
_C.DATASET = CN()
_C.DATASET.TRAIN_FILE = ""
_C.DATASET.VALID_FILE = ""
_C.DATASET.TOKENIZER_PREFIX_PATH = ""
_C.TRAIN = CN()
_C.TRAIN.LAST_PRETRAINED_MODEL = ''
_C.TRAIN.SAVE_STEPS = 1000
_C.TRAIN.WEIGHT_DECAY = 0.0
_C.TRAIN.NUM_TRAIN_EPOCHS = 3
_C.TRAIN.LOG_EVERY = 100
_C.TRAIN.LEARNING_RATE = 5e-5
_C.TRAIN.NUM_WARMUP_STEPS = 0
_C.TRAIN.BATCH_SIZE = 32
_C.TRAIN.USE_FP_16 = False
_C.TRAIN.MAX_TRAIN_STEPS = 1000000000000
_C.TRAIN.EVAL_STEPS = 1000
_C.TRAIN.GRADIENT_ACCUMULATION_STEPS = 1
_C.TRAIN.LR_SCHEDULER_TYPE = "linear"
_C.TRAIN.USE_GRADUAL_UNFREEZING = False
_C.TRAIN.UNFREEZING = CN()
_C.TRAIN.UNFREEZING.UNFREEZE_GROUPS = None
_C.TRAIN.UNFREEZING.TRAIN_STEPS_LIST = []
_C.TRAIN.UNFREEZING.LEARNING_RATE_LIST = []
_C.TRAIN.GPT2_PRETRAINED_MODEL = 'gpt2'
cfg = _C # global singleton object
| [
37811,
198,
7469,
13185,
329,
4566,
2134,
543,
318,
973,
355,
257,
2060,
1122,
2134,
287,
1628,
198,
37811,
198,
198,
6738,
331,
16436,
13,
11250,
1330,
327,
40616,
19667,
355,
31171,
198,
198,
62,
34,
796,
31171,
3419,
198,
198,
62,
... | 1.926995 | 589 |
#!/usr/bin/env python
"""
FUSE-based client for Seafile
- written by Dongsu Park <dpark@posteo.net>
(inspired by copy-fuse <https://github.com/copy-app/copy-fuse>)
A simple client for seafile.com, implemented via FUSE.
This tool allows a Linux/MacOSX client to mount a seafile cloud drive on a
local filesystem.
Quickstart usage:
$ mkdir -p /mnt/seafile
$ ./seafilefuse.py "http://127.0.0.1:8000" test@seafiletest.com "testtest" /mnt/seafile
(where server URL is "http://127.0.0.1:8000", username is test@seafiletest.com,
and password is "testtest".)
To unmount it:
$ fusermount -u /mnt/seafile
"""
from errno import ENOENT, EIO
from stat import S_IFDIR, S_IFREG
from sys import argv, exit, stderr
import os
import argparse
import tempfile
import time
import hashlib
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn
from seafileapi.client import SeafileApiClient
from seafileapi.exceptions import ClientHttpError, DoesNotExist
from seafileapi.files import SeafDir, SeafFile
from seafileapi.repo import Repo
from seafileapi.repos import Repos
# global configurable variables to be connected to a Seafile server.
sf_server_url="http://127.0.0.1:8000"
sf_username="test@seafiletest.com"
sf_password="testtest"
sf_mount_point="/mnt/seafile"
repo_id_len=36
cache_ttl=10
class SeafileCache:
"""class for handling caches of file attributes as well as expiration time.
SeafileCache instances must be initialized by SeafileFUSE.
"""
def add_attrcache(self, pdirpath, filename, isdir=False, size=0):
"""adds a new cache entry to self.attrcache, no matter if the entry for
the path already exists.
"""
if isdir:
ftype = 'dir'
else:
ftype = 'file'
self.attrcache[pdirpath][filename] = \
{'name': filename, 'type': ftype, 'size': size, 'ctime': time.time(), 'mtime': time.time()}
def update_attrcache(self, pdirpath, filename, isdir=False, size=0):
"""update an existing cache entry in self.attrcache, only if it
already exists for the path as a key.
"""
if pdirpath in self.attrcache:
self.add_attrcache(pdirpath, filename, isdir, size)
class SeafileFUSE(LoggingMixIn, Operations):
"""Main class of the seafile client filesystem based on FUSE.
On initialization, basic connections are established via SeafileApiClient.
Only one seafile repository is to be selected for further operations.
SeafileCache instance must be initialized from the init method as well.
"""
# Disable unused operations:
access = None
chmod = None
chown = None
getxattr = None
listxattr = None
opendir = None
releasedir = None
statfs = None
if __name__ == "__main__":
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
220,
376,
19108,
12,
3106,
5456,
329,
49967,
576,
198,
220,
220,
532,
3194,
416,
28831,
2385,
3250,
1279,
67,
20928,
31,
7353,
68,
78,
13,
3262,
29,
198,
220,
357,
... | 2.730019 | 1,026 |
#!/usr/bin/env python3
import rich.markup
from Crypto.PublicKey import RSA
import pwncat
from pwncat.facts import PrivateKey
from pwncat.modules import Status
from pwncat.platform.linux import Linux
from pwncat.modules.enumerate import Schedule, EnumerateModule
class Module(EnumerateModule):
"""
Search the victim file system for configuration files which may
contain private keys. This uses a regular expression based search
to find files whose contents look like a SSH private key.
"""
PROVIDES = ["creds.private_key"]
PLATFORM = [Linux]
SCHEDULE = Schedule.PER_USER
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
5527,
13,
4102,
929,
198,
6738,
36579,
13,
15202,
9218,
1330,
42319,
198,
198,
11748,
279,
675,
9246,
198,
6738,
279,
675,
9246,
13,
37473,
1330,
15348,
9218,
198,
6738... | 3.298913 | 184 |
import requests
from flask import make_response
from flask import render_template
from flask import Blueprint
from base.views.api.api_strain import get_isotypes, query_strains
from base.constants import DATASET_RELEASE, RELEASES
from base.models2 import strain_m
from base.utils.gcloud import list_release_files
from logzero import logger
data_bp = Blueprint('data',
__name__,
template_folder='data')
#
# Data Page
#
@data_bp.route('/release/latest')
@data_bp.route('/release/<string:selected_release>')
@data_bp.route('/release/<string:selected_release>')
def data(selected_release=DATASET_RELEASE):
"""
Default data page - lists
available releases.
"""
title = "Releases"
strain_listing = query_strains(release=selected_release)
# Fetch variant data
url = "https://storage.googleapis.com/elegansvariation.org/releases/{selected_release}/multiqc_bcftools_stats.json".format(selected_release=selected_release)
vcf_summary = requests.get(url).json()
release_summary = strain_m.release_summary(selected_release)
try:
phylo_url = list_release_files(f"releases/{DATASET_RELEASE}/popgen/trees/genome.pdf")[0]
except IndexError:
pass
VARS = {'title': title,
'strain_listing': strain_listing,
'vcf_summary': vcf_summary,
'phylo_url': phylo_url,
'RELEASES': RELEASES,
'release_summary': release_summary,
'selected_release': selected_release,
'wormbase_genome_version': dict(RELEASES)[selected_release]}
return render_template('data.html', **VARS)
#
# Download Script
#
@data_bp.route('/download/download_bams.sh')
#
# Browser
#
@data_bp.route('/browser/')
@data_bp.route('/browser/<region>')
@data_bp.route('/browser/<region>/<query>')
| [
11748,
7007,
198,
6738,
42903,
1330,
787,
62,
26209,
198,
6738,
42903,
1330,
8543,
62,
28243,
198,
6738,
42903,
1330,
39932,
198,
6738,
2779,
13,
33571,
13,
15042,
13,
15042,
62,
2536,
391,
1330,
651,
62,
271,
13567,
11,
12405,
62,
25... | 2.467828 | 746 |
# -*- coding: utf-8 -*-
import os
import platform
import site
if platform.system() == "Windows":
extlib_path = 'extlibs_windows'
if platform.system() == "Darwin":
extlib_path = 'extlibs_darwin'
if platform.system() == "Linux":
extlib_path = 'extlibs_linux'
site.addsitedir(os.path.abspath(os.path.join(os.path.dirname(__file__), extlib_path)))
# noinspection PyPep8Naming
def classFactory(iface): # pylint: disable=invalid-name
"""Instantiates Google Earth Engine Plugin.
:param iface: A QGIS interface instance.
:type iface: QgsInterface
"""
#
from .ee_plugin import GoogleEarthEnginePlugin
return GoogleEarthEnginePlugin(iface)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
28686,
198,
11748,
3859,
198,
11748,
2524,
198,
198,
361,
3859,
13,
10057,
3419,
6624,
366,
11209,
1298,
198,
220,
220,
220,
1070,
8019,
62,
6978,
796,
705,
2302,... | 2.679842 | 253 |
#!/usr/bin/env python
"""Sends configuration difference to Webex Teams, formerly Spark.
Compares the current running configuration and the saved running
configuration, creates diff and sends it to a Webex Teams room.
Should be invoked after every configuration change by EEM.
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import re
from cli import cli, clip
from ciscosparkapi import CiscoSparkAPI
BACKUP_CONFIG_IOS_PATH = 'flash:/running-config.bak'
def send_syslog(message):
"""Sends a syslog message to the device with severity 6
Args:
message (str): message to be sent
Returns:
None
"""
cli(
'send log facility PYTHON severity 6 mnemonics CONF_DIFF '
'{message}'.format(message=message)
)
def convert_ios_path_to_linux(path):
"""Convert the file path valid in IOS to the correct path in Guest Shell.
Example:
>>> convert_ios_path_to_linux('flash:/running-config.bak')
'/flash/running-config.bak'
Args:
path(str): the path valid in IOS.
Should contain filesystem, otherwise 'flash' is assumed
Returns:
string, the converted path which is valid in the Guest Shell
"""
path_components = os.path.normpath(path).split(os.sep)
file_system = path_components[0]
if ':' in file_system:
file_system = file_system.strip(':')
path_components = path_components[1:]
else:
file_system = 'flash'
result_path = os.path.join(os.sep, file_system, *path_components)
return result_path
def save_config_to_ios_file(backup_config_ios_path):
"""Saves the current running configuration locally to the filesystem
Args:
backup_config_ios_path (str): IOS path to the backup configuration
Returns:
None
"""
# MISSION TODO 3: replace with the function that runs IOS commands and
# returns output instead of printing it
MISSION('copy running-config {}\n'.format(backup_config_ios_path))
# END MISSION SECTION 3
message = (
'Running configuration was saved to {}'.format(backup_config_ios_path)
)
print(message)
send_syslog(message)
def get_config_diff(backup_config_ios_path):
"""Gets configuration difference using `show archive config diff` command
Args:
backup_config_ios_path (str): IOS path to the backup configuration
Returns:
list of lines containing config difference
"""
config_diff = cli(
'show archive config diff {} system:running-config'.format(
backup_config_ios_path
)
)
backup_config_linux_path = convert_ios_path_to_linux(
backup_config_ios_path
)
os.remove(backup_config_linux_path)
save_config_to_ios_file(backup_config_ios_path)
if re.search('No changes were found', config_diff):
return None
else:
# split lines by \r\n into a list
config_diff_lines = re.split(r'\r?\n', config_diff)
return config_diff_lines
def form_spark_message(config_diff_lines):
"""Creates a Spark message formatted in markdown based on config diff
Args:
config_diff_lines (list): list of lines containing config
difference
Returns:
str: markdown Spark message as a string
"""
message = (
'Configuration differences between '
'the running config and the last backup:\n'
'```\n'
'{}\n'
'```\n'
'I\'ve completed **Introduction to '
'Guest Shell** Mission!'.format('\n'.join(config_diff_lines))
)
return message
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
50,
2412,
8398,
3580,
284,
5313,
1069,
24690,
11,
15734,
17732,
13,
198,
198,
7293,
3565,
262,
1459,
2491,
8398,
290,
262,
7448,
2491,
198,
11250,
3924,
11,
8075,
814,
290,
1280... | 2.842169 | 1,641 |
import os
import pandas as pd
here = os.path.abspath(os.path.dirname(__file__))
zarr_cat_pangeo_cmip6 = 'https://storage.googleapis.com/cmip6/pangeo-cmip6.json'
cdf_cat_sample_cmip6 = os.path.join(here, 'sample-catalogs/cmip6-netcdf.json')
multi_variable_cat = os.path.join(here, 'sample-catalogs/multi-variable-catalog.json')
cdf_cat_sample_cmip5 = os.path.join(here, 'sample-catalogs/cmip5-netcdf.json')
cdf_cat_sample_cesmle = os.path.join(here, 'sample-catalogs/cesm1-lens-netcdf.json')
catalog_dict_records = os.path.join(here, 'sample-catalogs/catalog-dict-records.json')
zarr_cat_aws_cesm = (
'https://raw.githubusercontent.com/NCAR/cesm-lens-aws/master/intake-catalogs/aws-cesm1-le.json'
)
mixed_cat_sample_cmip6 = os.path.join(here, 'sample-catalogs/cmip6-bcc-mixed-formats.json')
sample_df = pd.DataFrame(
[
{
'component': 'atm',
'frequency': 'daily',
'experiment': '20C',
'variable': 'FLNS',
'path': 's3://ncar-cesm-lens/atm/daily/cesmLE-20C-FLNS.zarr',
'format': 'zarr',
},
{
'component': 'atm',
'frequency': 'daily',
'experiment': '20C',
'variable': 'FLNSC',
'path': 's3://ncar-cesm-lens/atm/daily/cesmLE-20C-FLNSC.zarr',
'format': 'zarr',
},
]
)
sample_esmcat_data = {
'esmcat_version': '0.1.0',
'id': 'aws-cesm1-le',
'description': '',
'catalog_file': '',
'attributes': [],
'assets': {'column_name': 'path', 'format': 'zarr'},
'aggregation_control': {
'variable_column_name': 'variable',
'groupby_attrs': ['component', 'experiment', 'frequency'],
'aggregations': [
{'type': 'union', 'attribute_name': 'variable', 'options': {'compat': 'override'}}
],
},
}
sample_esmcat_data_without_agg = {
'esmcat_version': '0.1.0',
'id': 'aws-cesm1-le',
'description': '',
'catalog_file': '',
'attributes': [],
'assets': {'column_name': 'path', 'format': 'zarr'},
}
| [
11748,
28686,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
1456,
796,
28686,
13,
6978,
13,
397,
2777,
776,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
4008,
198,
89,
3258,
62,
9246,
62,
79,
858,
78,
62,
11215,
... | 2.014663 | 1,023 |
# we can use Segment Tree 24 and Segment Tree 31 and combine them.
if __name__ == '__main__':
n, m = [int(i) for i in input().split()]
STree1 = SegmentTree1(n)
STree2 = SegmentTree2(n, lambda a, b, x: a + b*x, lambda a, b: a+b, 0)
arr = [int(i) for i in input().split()]
for i in range(n):
STree1.update(i, i+1, arr[i]*(i+1), 0) # not optimal way
STree2.update(i, i+1, arr[i])
print(STree1.T)
print(STree2.T)
for i in range(m):
t = [int(i) for i in input().split()]
if t[0] == 1:
STree1.update(t[1] - 1, t[2], t[3]*t[1], t[3])
STree2.update(t[1] - 1, t[2], t[3])
#print(STree1.T)
#print(STree2.T)
else:
m1 = STree1.query(t[1] - 1, t[2])
m2 = STree2.query(t[1] - 1, t[2])
print(m1 - m2*(t[1]-1))
#print(m1, m2) | [
2,
356,
460,
779,
1001,
5154,
12200,
1987,
290,
1001,
5154,
12200,
3261,
290,
12082,
606,
13,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
299,
11,
285,
796,
685,
600,
7,
72,
8,
329,... | 1.734774 | 509 |
summary = d = {}
d['lig0'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig10'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig11'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig12'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig13'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig14'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig15'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig16'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig17'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig18'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig19'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig1'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig20'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig21'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig22'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig23'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig24'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig25'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig26'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig27'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig28'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig29'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig2'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig30'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig31'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig32'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig33'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig34'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig35'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig36'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig37'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig38'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig39'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig3'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig40'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig41'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig42'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig43'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig44'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig45'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig46'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig0'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig10'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig11'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig12'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig13'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig14'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig15'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig16'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig17'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig18'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig19'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig1'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig20'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig21'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig22'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig23'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig24'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig25'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig26'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig27'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig28'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig29'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig2'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig30'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig31'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig32'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig33'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig34'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig35'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig36'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig37'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig38'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig39'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig3'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig40'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig41'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig42'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig43'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig44'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig45'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig46'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig47'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig48'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig49'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig4'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig5'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig6'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig7'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig8'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig9'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig0'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig100'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig101'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig102'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig103'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig104'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig105'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig106'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig107'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig108'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig109'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig10'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig110'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig111'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig112'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig113'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig114'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig115'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig116'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig117'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig118'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig119'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig11'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig120'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig121'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig122'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig123'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig124'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig125'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig126'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig127'] = {'atom_types': ['A', 'Br', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig128'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig129'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig12'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig130'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig131'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig132'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig133'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig134'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig135'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig136'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig137'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig138'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig139'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig13'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig140'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig141'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig142'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig143'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig144'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig145'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig146'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig147'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig148'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig149'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig14'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig150'] = {'atom_types': ['A', 'C', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig151'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig152'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig153'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig154'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig155'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig156'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig157'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig158'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig159'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig15'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig160'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig161'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig162'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig163'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig164'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig165'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig166'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig167'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig168'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig169'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig16'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig170'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig171'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig172'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig173'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig174'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig175'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig176'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig177'] = {'atom_types': ['A', 'Br', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig178'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig179'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig17'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig180'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig181'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig182'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig183'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig184'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig185'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig186'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig187'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig188'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig189'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig18'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig190'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig191'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig192'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig193'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig194'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig195'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig196'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig197'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig198'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig199'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig19'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig1'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig200'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig201'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig202'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig203'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig204'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig205'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig206'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig207'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig208'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig209'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig20'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig210'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig211'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig212'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig213'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig214'] = {'atom_types': ['A', 'C', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig215'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig216'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig217'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig218'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig219'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig21'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig220'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig221'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig222'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig223'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig224'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig225'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig226'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig227'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig228'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig229'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig22'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig230'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig231'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig232'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig233'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig234'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig235'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig236'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig237'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig238'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig239'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig23'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig240'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig241'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig242'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig243'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig244'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig245'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig246'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig247'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig248'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig249'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig24'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig250'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig251'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig252'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig253'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig254'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig255'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig256'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig257'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig258'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig259'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig25'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig260'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig261'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig262'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig263'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig264'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig265'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig266'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig267'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig268'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig269'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig26'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig270'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig271'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig272'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig273'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig274'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig275'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig276'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig277'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig278'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig279'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig27'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig280'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig281'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig282'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig283'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig284'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig285'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig286'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig287'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig288'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig289'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig28'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig290'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig291'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig292'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig293'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig294'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig295'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig296'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig297'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig298'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig299'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig29'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig2'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig300'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig301'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig302'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig303'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig304'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig305'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig306'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig307'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig308'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig309'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig30'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig310'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig311'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig312'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig313'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig314'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig315'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig316'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig317'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig318'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig319'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig31'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig320'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig321'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig322'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig323'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig324'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig325'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig326'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig327'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig328'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig329'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig32'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig330'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig331'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig332'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig333'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig334'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA', 'S' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig335'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig336'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig337'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig338'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig339'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig33'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig340'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig341'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig342'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig343'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'S' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig344'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig345'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig346'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig347'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig348'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig349'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig34'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig350'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig351'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig352'] = {'atom_types': ['A', 'C', 'HD', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig353'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig354'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig355'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig356'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig357'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig358'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig359'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig35'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig360'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig361'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig362'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig363'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig364'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig365'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig366'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig367'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig368'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig369'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig36'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig370'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig371'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig372'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig373'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig374'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig375'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig376'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig377'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig378'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig379'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig37'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig380'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig381'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig382'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig383'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig384'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig385'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig386'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig387'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig388'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig389'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig38'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig390'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig391'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig392'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig393'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig394'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig395'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig396'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig397'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig398'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig399'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig39'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig3'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig400'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig401'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig402'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig403'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig404'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig405'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig406'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig407'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig408'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig409'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig40'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig410'] = {'atom_types': ['A', 'Br', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig411'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig412'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig413'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig414'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig415'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig416'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig417'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig418'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig419'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig41'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig420'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig421'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig422'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig423'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig424'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig425'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig426'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig427'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig428'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig429'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig42'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig430'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig431'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig432'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig433'] = {'atom_types': ['A', 'C', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig434'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig435'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig436'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig437'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig438'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig439'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig43'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig440'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig441'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig442'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig443'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig444'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig445'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig446'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig447'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig448'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig449'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig44'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig450'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig451'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig452'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig453'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig454'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig455'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig456'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig457'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig458'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig459'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig45'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig460'] = {'atom_types': ['A', 'Br', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig461'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig462'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig463'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig464'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig465'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig466'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig467'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig468'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig469'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig46'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig470'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig471'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig472'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig473'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig474'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig475'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig476'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig477'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig478'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig479'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig47'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig480'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig481'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig482'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig483'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig484'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig485'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig486'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig487'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig488'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig489'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig48'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig490'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig491'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig492'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig493'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig494'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig495'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig496'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig497'] = {'atom_types': ['A', 'C', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig498'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig499'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig49'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig4'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig500'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig501'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig502'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig503'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig504'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig505'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig506'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig507'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig508'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig509'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig50'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig510'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig511'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig512'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig513'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig514'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig515'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig516'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig517'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig518'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig519'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig51'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA', 'S' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig520'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig521'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig522'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig523'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig524'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig525'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig526'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig527'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig528'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig529'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig52'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig530'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig531'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig532'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig533'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig534'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig535'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig536'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig537'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig538'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig539'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig53'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig540'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig541'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig542'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig543'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig544'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig545'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig546'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig547'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig548'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig549'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig54'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig550'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig551'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig552'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig553'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig554'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig555'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig556'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig557'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig558'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig559'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig55'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig560'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig561'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig562'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig563'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig564'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig565'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig566'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig567'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig568'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig569'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig56'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig570'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig571'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig572'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig573'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig574'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig575'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig576'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig577'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig578'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig579'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig57'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig580'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig581'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig582'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig583'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig584'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig585'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig586'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig587'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig588'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig589'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig58'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig590'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig591'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig592'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig593'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig594'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig595'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig596'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig597'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig598'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig599'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig59'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig5'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig600'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig601'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig602'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig603'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig604'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig605'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig606'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig607'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig608'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig609'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig60'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'S' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig610'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig611'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig612'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig613'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig614'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig615'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig616'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig617'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA', 'S' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig618'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig619'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig61'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig620'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig621'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig622'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig623'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig624'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig625'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig626'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'S' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig627'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig628'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig629'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig62'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig630'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig631'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig632'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig633'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig634'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig635'] = {'atom_types': ['A', 'C', 'HD', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig636'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig637'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig638'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig639'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig63'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig640'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig641'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig642'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig643'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig644'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig645'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig646'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig647'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig648'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig649'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig64'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig650'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig651'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig652'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig653'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig654'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig655'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig656'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig657'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig658'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig659'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig65'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig660'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig661'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig662'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig663'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig664'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig665'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig666'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig667'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig668'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig669'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig66'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig670'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig671'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig672'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig673'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig674'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig675'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig676'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig677'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig678'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig679'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig67'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig680'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig681'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig682'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig683'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig684'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig685'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig686'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig687'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig688'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig689'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig68'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig690'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig691'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig692'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig693'] = {'atom_types': ['A', 'Br', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig694'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig695'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig696'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig697'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig698'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig699'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig69'] = {'atom_types': ['A', 'C', 'HD', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig6'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig700'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig701'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig702'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig703'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig704'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig705'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig706'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig707'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig708'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig709'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig70'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig710'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig711'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig712'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig713'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig714'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig715'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig716'] = {'atom_types': ['A', 'C', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig717'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig718'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig719'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig71'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig720'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig721'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig722'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig723'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig724'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig725'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig726'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig727'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig728'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig729'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig72'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig730'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig731'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig732'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig733'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig734'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig735'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig736'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig737'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig738'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig739'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig73'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig740'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig741'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig742'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig743'] = {'atom_types': ['A', 'Br', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig744'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig745'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig746'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig747'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig748'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig749'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig74'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig750'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig751'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig752'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig753'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig754'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig755'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig756'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig757'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig758'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig759'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig75'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig760'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig761'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig762'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig763'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig764'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig765'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig766'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig767'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig768'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig769'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig76'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig770'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig771'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig772'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig773'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig774'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig775'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig776'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig777'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig778'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig779'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig77'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig780'] = {'atom_types': ['A', 'C', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig781'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig782'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig783'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig784'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig785'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig786'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig787'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig788'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig789'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig78'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig790'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig791'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig792'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig793'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig794'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig795'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig796'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig797'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig798'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig799'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig79'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig7'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig800'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig801'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig802'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig803'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig804'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig805'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig806'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig807'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig808'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig809'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig80'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig810'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig811'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig812'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig813'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig814'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig815'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig816'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig817'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig818'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig819'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig81'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig820'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig821'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig822'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig823'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig824'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig825'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig826'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig827'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig828'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig829'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig82'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig830'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig831'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig832'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig833'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig834'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig835'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig836'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig837'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig838'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig839'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig83'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig840'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig841'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig842'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig843'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig844'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig845'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig846'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig847'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig848'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig84'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig85'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig86'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig87'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig88'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig89'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig8'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig90'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig91'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig92'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig93'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig94'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig95'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig96'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig97'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig98'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig99'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig9'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig0'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig100'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig101'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig102'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig103'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig104'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig105'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig106'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig107'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig108'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig109'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig10'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig110'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig111'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig112'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig113'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig114'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig115'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig116'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig117'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig118'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig119'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig11'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig120'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig121'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig122'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig123'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig124'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig125'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig126'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig127'] = {'atom_types': ['A', 'Br', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig128'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig129'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig12'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig130'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig131'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig132'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig133'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig134'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig135'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig136'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig137'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig138'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig139'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig13'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig140'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig141'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig142'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig143'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig144'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig145'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig146'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig147'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig148'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig149'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig14'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig150'] = {'atom_types': ['A', 'C', 'N', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig151'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig152'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig153'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig154'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig155'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig156'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig157'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig158'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig159'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig15'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig160'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig161'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig162'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig163'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig164'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig165'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig166'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig167'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig168'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig169'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig16'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig170'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig171'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig172'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig173'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig174'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig175'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig176'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig177'] = {'atom_types': ['A', 'Br', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig178'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig179'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig17'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig180'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig181'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig182'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig183'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig184'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig185'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig186'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig187'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig188'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig189'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig18'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig190'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig191'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig192'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig193'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig194'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig195'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig196'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig197'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig198'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig199'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig19'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig1'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig200'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig201'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig202'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig203'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig204'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig205'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig206'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig207'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig208'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig209'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig20'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig210'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig211'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig212'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig213'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig214'] = {'atom_types': ['A', 'C', 'NA', 'OA', 'S' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig215'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig216'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig217'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig218'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig219'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig21'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig220'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig221'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig222'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig223'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig224'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig225'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig226'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig227'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig228'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig229'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig22'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig230'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig231'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig232'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig233'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig234'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig235'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig236'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig237'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig238'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig239'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig23'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig240'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig241'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig242'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig243'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig244'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig245'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig246'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig247'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig248'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig249'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig24'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig250'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig251'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig252'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig253'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig254'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig255'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig256'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig257'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig258'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig259'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig25'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig260'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig261'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig262'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig263'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig264'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig265'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig266'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig267'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig268'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig269'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig26'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig270'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig271'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig272'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig273'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig274'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig275'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig276'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig277'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig278'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig279'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig27'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig280'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig281'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig282'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig28'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig29'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig2'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig30'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig31'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig32'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig33'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig34'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig35'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig36'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig37'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig38'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig39'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig3'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig40'] = {'atom_types': ['C', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig41'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig42'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig43'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig44'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig45'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig46'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig47'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig48'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig49'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig4'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig50'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig51'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA', 'S' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig52'] = {'atom_types': ['A', 'C', 'N', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig53'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig54'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig55'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig56'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig57'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig58'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig59'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig5'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig60'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'S' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig61'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig62'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig63'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig64'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig65'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig66'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig67'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig68'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig69'] = {'atom_types': ['A', 'C', 'HD', 'NA', 'OA', 'S' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig6'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig70'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig71'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig72'] = {'atom_types': ['A', 'C', 'N', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig73'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig74'] = {'atom_types': ['A', 'C', 'F', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig75'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig76'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig77'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig78'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig79'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig7'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig80'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig81'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig82'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig83'] = {'atom_types': ['A', 'Br', 'C', 'HD', 'N', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
d['lig84'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig85'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'S', 'SA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig86'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig87'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig88'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig89'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig8'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':6,
'zero_charge' : [],
}
d['lig90'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig91'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':2,
'zero_charge' : [],
}
d['lig92'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig93'] = {'atom_types': ['C', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig94'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig95'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig96'] = {'atom_types': ['A', 'C', 'N', 'NA', 'OA' ],
'rbonds':5,
'zero_charge' : [],
}
d['lig97'] = {'atom_types': ['A', 'C', 'HD', 'N', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig98'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA', 'SA' ],
'rbonds':3,
'zero_charge' : [],
}
d['lig99'] = {'atom_types': ['A', 'C', 'HD', 'N', 'NA', 'OA' ],
'rbonds':7,
'zero_charge' : [],
}
d['lig9'] = {'atom_types': ['A', 'C', 'F', 'HD', 'N', 'NA', 'OA' ],
'rbonds':4,
'zero_charge' : [],
}
| [
49736,
796,
288,
796,
23884,
198,
67,
17816,
4604,
15,
20520,
796,
1391,
6,
37696,
62,
19199,
10354,
37250,
32,
3256,
705,
34,
3256,
705,
37,
3256,
705,
10227,
3256,
705,
45,
3256,
705,
4535,
3256,
705,
23621,
6,
16589,
198,
197,
19... | 1.815114 | 71,379 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Snips Lights + Homeassistant
# -----------------------------------------------------------------------------
# Copyright 2019 Patrick Fial
# -----------------------------------------------------------------------------
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
# LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import io
import toml
import requests
import logging
from os import environ
from snipsTools import SnipsConfigParser
from hermes_python.hermes import Hermes
from hermes_python.ontology import *
# -----------------------------------------------------------------------------
# global definitions (home assistant service URLs)
# -----------------------------------------------------------------------------
HASS_LIGHTS_ON_SVC = "/api/services/light/turn_on"
HASS_LIGHTS_OFF_SVC = "/api/services/light/turn_off"
HASS_GROUP_ON_SVC = "/api/services/homeassistant/turn_on"
HASS_GROUP_OFF_SVC = "/api/services/homeassistant/turn_off"
HASS_AUTOMATION_ON_SVC = "/api/services/automation/turn_on"
HASS_AUTOMATION_OFF_SVC = "/api/services/automation/turn_off"
APP_ID = "snips-skill-s710-lights"
# -----------------------------------------------------------------------------
# class App
# -----------------------------------------------------------------------------
# -------------------------------------------------------------------------
# ctor
# -----------------------------------------------------------------------------
# read_toml
# -------------------------------------------------------------------------
# start
# -------------------------------------------------------------------------
# on_intent
# -------------------------------------------------------------------------
# done
# -------------------------------------------------------------------------
# params_of
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
App()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
16529,
32501,
198,
2,
5489,
2419,
22661,
1343,
5995,
562,
10167,
198,
2,
16529,
32501,
198,
2,
15069,
131... | 4.71835 | 703 |
# Generated by Django 3.0.4 on 2020-04-21 21:32
from django.db import migrations
| [
2,
2980,
515,
416,
37770,
513,
13,
15,
13,
19,
319,
12131,
12,
3023,
12,
2481,
2310,
25,
2624,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
628
] | 2.766667 | 30 |
for _ in range(int(input())):
n = input()
if n in {'1', '4', '78'}:
print('+')
elif n.endswith('35'):
print('-')
elif n.startswith('9') and n.endswith('4'):
print('*')
else:
print('?')
| [
1640,
4808,
287,
2837,
7,
600,
7,
15414,
28955,
2599,
198,
220,
220,
220,
299,
796,
5128,
3419,
198,
220,
220,
220,
611,
299,
287,
1391,
6,
16,
3256,
705,
19,
3256,
705,
3695,
6,
38362,
198,
220,
220,
220,
220,
220,
220,
220,
36... | 1.823077 | 130 |
"""Tests the ``gdc_filtration_tools.tools.extract_oxoq`` module.
"""
import sqlite3
import tempfile
import unittest
import attr
from gdc_filtration_tools.tools.extract_oxoq import extract_oxoq_from_sqlite, get_oxoq
from tests.utils import captured_output, cleanup_files
@attr.s
def build_test_schema(conn):
"""
utility to set test schemas.
"""
sql_script = """
DROP TABLE IF EXISTS "picard_CollectOxoGMetrics";
CREATE TABLE "picard_CollectOxoGMetrics" (
TOTAL_BASES TEXT,
ALT_OXO_BASES TEXT,
ALT_NONOXO_BASES TEXT,
OXIDATION_Q TEXT,
CONTEXT TEXT,
input_state TEXT
);
"""
csr = conn.executescript(sql_script)
csr.close()
| [
37811,
51,
3558,
262,
7559,
70,
17896,
62,
69,
36055,
62,
31391,
13,
31391,
13,
2302,
974,
62,
1140,
78,
80,
15506,
8265,
13,
198,
37811,
198,
11748,
44161,
578,
18,
198,
11748,
20218,
7753,
198,
11748,
555,
715,
395,
198,
198,
1174... | 2.269841 | 315 |
"""
dialog module
"""
from time import sleep
from typing import Tuple
import matplotlib.pyplot as plt
# from matplotlib.image import AxesImage
from matplotlib.patches import Rectangle
import numpy as np
Point = Tuple[int, int]
i = 3 # type: int
i = '3'
class Ui(object):
"""
dialog
"""
def show_photo(
self,
file: str
) -> None:
""" show photo
"""
img = plt.imread(file)
self.im = plt.imshow(img)
def draw_rectangle(
self,
x: int,
y: int,
width: int,
height: int,
) -> None:
""" draw """
rgba = np.random.rand(4,)
# color = color.list()
rect = Rectangle(
[x, y],
width,
height,
linewidth=1,
color=rgba,
edgecolor='r',
facecolor='none'
)
self.ax.add_patch(rect)
def draw(self) -> None:
""" refresh plt show """
# self.ax.draw()
plt.draw()
def show(self):
""" show """
self.ax.axis('off') # clear x- and y-axes
plt.show()
def pause(self, seconds: int) -> None:
""" pause """
plt.pause(seconds)
def sleep(self, seconds: int) -> None:
""" sleep """
sleep(seconds)
def interactive_mode(self, switch: bool = True) -> None:
""" interactive """
if switch:
plt.ion()
else:
plt.ioff()
def wait_key(self) -> None:
""" wait press """
# plt.pause(1) # <-------
# raw_input("<Hit Enter To Close>")
plt.waitforbuttonpress()
def huan(self, n: int) -> None:
""" test
"""
print('huan%d' % (n))
| [
37811,
198,
38969,
519,
8265,
198,
37811,
198,
6738,
640,
1330,
3993,
198,
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
2,
422,
2603,
29487,
8019,
13,
9060,
1330,
12176,
274,
5... | 1.921336 | 928 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Distributed under the terms of the MIT License.
"""
Module for plotting reaction-based functions.
Author: Andrew Tarzia
Date Created: 15 Sep 2018
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.ticker import MaxNLocator
import plotting_fn as pfn
def no_rxns_vs_size(data, params, plot_suffix):
"""
Plot number of possible reactions as a function of size threshold.
"""
fig, ax = plt.subplots(figsize=(8, 5))
# bin each of the sets of data based on X value
width = 0.5
X_bins = np.arange(0, 20.5, width)
hist, bin_edges = np.histogram(a=data['max_mid_diam'], bins=X_bins)
ax2 = ax.twinx()
ax2.bar(
bin_edges[:-1],
hist,
align='edge',
alpha=0.9, width=width,
color='#2C3E50',
edgecolor='k'
)
# cumulative plot
cumul = np.cumsum(hist)
ax.plot(
bin_edges[:-1],
cumul,
alpha=1.0,
label='max component < threshold',
color='r',
marker='o'
)
# ax.axvspan(xmin=4.0, xmax=6.6, facecolor='k', alpha=0.2,
# hatch="/")
ax.axvspan(xmin=4.0, xmax=6.6, facecolor='k', alpha=0.2)
# ax.axvspan(xmin=5.4, xmax=6.6, facecolor='k', alpha=0.2)
# plot possible region of ZIF pore limiting diameters from
# Banerjee 2008 - 10.1126/science.1152516
# ax.axvspan(0.0, 13, facecolor='#2ca02c', alpha=0.2)
# ax.axvline(x=13.1, c='k', lw=2, linestyle='--')
pfn.define_standard_plot(
ax,
xtitle=r'$d$ of largest component [$\mathrm{\AA}$]',
ytitle='cumulative # reactions',
xlim=(0, 17),
ylim=(0, int(max(cumul)+max(cumul)*0.1))
)
ax2.set_ylim(0, int(max(hist)+max(hist)*0.2))
ax2.set_ylabel('# reactions', fontsize=16)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax2.yaxis.set_major_locator(MaxNLocator(integer=True))
# Change left y axis colours.
ax.spines['left'].set_color('red')
ax2.spines['left'].set_color('red')
ax2.tick_params(axis='both', which='major', labelsize=16)
fig.tight_layout()
fig.savefig(
f"{plot_suffix}/size_threshold_{plot_suffix}.pdf",
dpi=720,
bbox_inches='tight'
)
def rxn_space(data, filename):
"""
Plot number of possible reactions as a function of size threshold.
"""
plot_prop = {
1: {
'c': '#FA7268',
'e': 'none',
'a': 0.5,
'm': 'o',
's': 50,
'label': 'class I'
},
2: {
'c': '#DAF7A6',
'e': 'none',
'a': 0.5,
'm': 'x',
's': 50,
'label': 'class II'
},
3: {
'c': '#900C3F',
'e': 'none',
'a': 1.0,
'm': 'x',
's': 50,
'label': 'class III'
},
4: {
'c': '#F6D973',
'e': 'none',
'a': 0.5,
'm': 'x',
's': 50,
'label': 'class IV'
}
}
# bin each of the sets of data based on X value
width = 0.5
X_bins = np.arange(0, 20.5, width)
fig, ax = plt.subplots(figsize=(8, 5))
# bin each of the sets of data based on X value
for p in plot_prop:
if p != 3:
continue
pp = plot_prop[p]
sub_data = data[data['PC_class'] == p]
hist, bin_edges = np.histogram(
a=sub_data['max_mid_diam'],
bins=X_bins
)
ax.bar(
bin_edges[:-1],
hist,
align='edge',
alpha=pp['a'],
width=width,
color=pp['c'],
edgecolor='k',
label=pp['label']
)
ax.legend(fontsize=16)
ax.axvspan(xmin=4.0, xmax=6.6, facecolor='k', alpha=0.2, hatch="/")
# ax.axvspan(xmin=5.4, xmax=6.6, facecolor='k', alpha=0.2)
# plot possible region of ZIF pore limiting diameters from
# Banerjee 2008 - 10.1126/science.1152516
# ax.axvspan(0.0, 13, facecolor='#2ca02c', alpha=0.2)
# HOF.
ax.axvline(x=13.1, c='k', lw=2, linestyle='--')
pfn.define_standard_plot(
ax,
xtitle=r'$d$ of largest component [$\mathrm{\AA}$]',
ytitle='# reactions',
xlim=(0, 17),
ylim=None
)
fig.tight_layout()
fig.savefig(
filename,
dpi=720,
bbox_inches='tight'
)
def rxn_value(data, filename):
"""
Plot the value of all reactions as violin plot.
"""
plot_prop = {
1: {
'c': '#900C3F',
'e': 'none',
'a': 0.5,
'm': 'o',
's': 50,
'label': 'class I'
},
2: {
'c': '#FA7268',
'e': 'none',
'a': 0.5,
'm': 'x',
's': 50,
'label': 'class II'
},
3: {
'c': '#F6D973',
'e': 'none',
'a': 1.0,
'm': 'x',
's': 50,
'label': 'class III'
},
4: {
'c': '#DAF7A6',
'e': 'none',
'a': 0.5,
'm': 'x',
's': 50,
'label': 'class IV'
}
}
fig, ax = plt.subplots(figsize=(8, 5))
# bin each of the sets of data based on X value
for p in plot_prop:
pp = plot_prop[p]
sub_data = data[data['PC_class'] == p]
values = sub_data['max_mid_diam']
number = int(p)
parts = ax.violinplot(
values,
[number],
showmeans=False,
showmedians=False,
showextrema=False
)
for pc in parts['bodies']:
pc.set_facecolor(pp['c'])
pc.set_edgecolor('black')
pc.set_alpha(1.0)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel('purchasability class', fontsize=16)
ax.set_ylabel(
r'$d$ of largest component [$\mathrm{\AA}$]',
fontsize=16
)
ax.set_xlim(0.5, 4.5)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.axhspan(ymin=4.0, ymax=6.6, facecolor='k', alpha=0.2)
fig.tight_layout()
fig.savefig(
filename,
dpi=720,
bbox_inches='tight'
)
def rxn_complexity(data, filename):
"""
Plot the measures of complexity of each reaction.
"""
fig, ax = plt.subplots(figsize=(8, 5))
ylim = (-1000, 1000)
xlim = (-10, 10)
# CS = [(1.0, 1.0, 1.0), (44/255, 62/255, 80/255)]
# cm = colors.LinearSegmentedColormap.from_list('test', CS, N=10)
# fig, ax, hist = pfn.twoD_histogram(
# X_data=data['deltasa'],
# Y_data=data['deltabct'],
# xlim=xlim,
# ylim=ylim,
# cmap=cm,
# fig=fig,
# ax=ax
# )
# cbar = fig.colorbar(hist[3], ax=ax)
# cbar.ax.set_ylabel('count', fontsize=16)
# cbar.ax.tick_params(labelsize=16)
ax.scatter(
data['deltasa'],
data['deltabct'],
c='#CCD1D1',
edgecolors='none',
marker='o',
alpha=1.0,
s=40,
label='full dataset'
)
small_data = data[data['max_mid_diam'] < 6.6]
ax.scatter(
small_data['deltasa'],
small_data['deltabct'],
c='#2C3E50',
edgecolors='none',
marker='o',
alpha=1.0,
s=40,
label='viable reactions'
)
pfn.define_standard_plot(
ax,
# xtitle='number of heavy atoms',
ylim=ylim,
xlim=xlim,
ytitle=r'$\Delta$ BertzCT',
xtitle=r'$\Delta$ SAscore',
)
ax.legend(fontsize=16)
fig.tight_layout()
fig.savefig(
filename,
dpi=720,
bbox_inches='tight'
)
def save_candidates(data, params, filename):
"""
Save candidates to file.
"""
all_fit = data.sort_values(by='max_mid_diam')
all_fit.to_csv(filename, index=False)
print(f'There are {len(all_fit)} candidate reactions!')
print('---------------------------------------------------')
def stacked_dist(data, col, xtitle, xlim, width):
"""
Plot histograms of data stacked by top level EC no.
"""
delta_data = {'total': []}
for i, row in data.iterrows():
EC = row['ec']
top_EC = EC.split('.')[0]
if top_EC not in list(delta_data.keys()):
delta_data[top_EC] = []
delta_data[top_EC].append(row[col])
delta_data['total'].append(row[col])
fig, ax = plt.subplots(figsize=(8, 5))
if xlim is None:
xlim = (
min([min(delta_data[i]) for i in delta_data])-2*width,
max([max(delta_data[i]) for i in delta_data])+2*width
)
X_bins = np.arange(xlim[0], xlim[1], width)
for keys in delta_data:
values = delta_data[keys]
hist, bin_edges = np.histogram(
a=values,
bins=X_bins,
density=True
)
ax.plot(
X_bins[:-1]+width/2,
hist,
c=pfn.EC_descriptions()[keys][1],
lw='1.5',
marker='o',
alpha=1.0,
label=pfn.EC_descriptions()[keys][0]
)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel(xtitle, fontsize=16)
ax.set_ylabel('frequency', fontsize=16)
ax.set_xlim(xlim)
# legend
ax.legend(fontsize=16)
return fig, ax
def dist(X, xtitle, xlim, width):
"""
Plot histograms of data.
"""
fig, ax = plt.subplots(figsize=(8, 5))
if xlim is None:
xlim = (min(X)-2*width, max(X)+2*width)
X_bins = np.arange(xlim[0], xlim[1], width)
hist, bin_edges = np.histogram(a=X, bins=X_bins)
if xtitle == 'purchasability class':
align = 'center'
else:
align = 'edge'
ax.bar(
bin_edges[:-1],
hist,
align=align,
alpha=1.0,
width=width,
color='#2980B9',
edgecolor='k'
)
pfn.define_standard_plot(
ax,
xtitle=xtitle,
ytitle='count',
xlim=xlim,
ylim=None
)
return fig, ax
def pie(X, xtitle, xlim, width):
"""
Plot pie chart of categorical data.
"""
if xtitle == 'purchasability class':
labels = ['class I', 'class II', 'class III', 'class IV']
colours = ['#D2B1D1', '#3498DB', '#C0392B', '#CCD1D1']
sizes = [
len([i for i in X if i == 1]),
len([i for i in X if i == 2]),
len([i for i in X if i == 3]),
len([i for i in X if i == 4])
]
else:
raise ValueError('this type of plot is not defined.')
# explode = (0.0, 0.0)
fig, ax = plt.subplots(figsize=(5, 5))
wedges, _, _ = ax.pie(
sizes,
colors=colours,
# explode=explode,
labels=labels,
autopct='%1.1f%%',
# shadow=True,
startangle=90,
textprops={'fontsize': 16}
)
for w in wedges:
w.set_linewidth(1.5)
w.set_edgecolor('k')
# Equal aspect ratio ensures that pie is drawn as a circle.
ax.axis('equal')
return fig, ax
def violinplot(data, col, ytitle, ylim):
"""
Plot violin plots of data separated by top level EC no.
"""
delta_data = {'total': []}
for i, row in data.iterrows():
EC = row['ec']
top_EC = EC.split('.')[0]
if top_EC not in list(delta_data.keys()):
delta_data[top_EC] = []
delta_data[top_EC].append(row[col])
delta_data['total'].append(row[col])
fig, ax = plt.subplots(figsize=(8, 5))
for keys in delta_data:
values = delta_data[keys]
if keys == '-':
number = 0
elif keys == 'total':
number = -1
else:
number = int(keys)
parts = ax.violinplot(
values,
[number],
showmeans=False,
showmedians=False,
showextrema=False
)
for pc in parts['bodies']:
pc.set_facecolor(pfn.EC_descriptions()[keys][1])
pc.set_edgecolor('black')
pc.set_alpha(1.0)
ax.tick_params(axis='both', which='major', labelsize=16)
ax.set_xlabel('EC number', fontsize=16)
ax.set_ylabel(ytitle, fontsize=16)
ax.set_xlim(-2, 8)
ax.set_xticks([-1, 0, 1, 2, 3, 4, 5, 6, 7])
ax.set_xticklabels(
['all', 'unknown', '1', '2', '3', '4', '5', '6', '7']
)
if col == 'max_mid_diam':
ax.axhspan(ymin=4.0, ymax=6.6, facecolor='k', alpha=0.2)
ax.set_ylim(ylim)
return fig, ax
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
17168,
13789,
13,
198,
198,
37811,
198,
26796,
329,
29353,
6317,
12,
310... | 1.848088 | 6,879 |
#!/usr/local/Caskroom/miniconda/base/envs/music_venv/bin/pythonw
from pyo import *
s = Server().boot()
path = SNDS_PATH + "/transparent.aif"
# stereo playback with a slight shift between the two channels.
sf = SfPlayer(path, speed = [1, 0.995], loop = True, mul = 0.4).out()
s.gui(locals())
| [
2,
48443,
14629,
14,
12001,
14,
34,
2093,
3823,
14,
1084,
291,
13533,
14,
8692,
14,
268,
14259,
14,
28965,
62,
574,
85,
14,
8800,
14,
29412,
86,
198,
198,
6738,
279,
8226,
1330,
1635,
198,
198,
82,
796,
9652,
22446,
18769,
3419,
1... | 2.619469 | 113 |
import re
from termcolor import colored
# c = 'Objective: We investigated whether implantation of polylactic acid and epsilon-caprolactone copolymer (PLAC) cubes with or without basic fibroblast growth factor (b-FGF) released slowly from gelatin microspheres was able to induce fibrous tissue in the dead space remaining after pneumonectomy in the thoracic cavity.'
# s = 'Objective: We investigated whether implantation of polylactic acid and epsilon-caprolactone copolymer (PLAC) cubes with or without basic fibroblast growth factor (b-FGF) released slowly from gelatin microspheres was able to induce fibrous tissue in the dead space remaining after pneumonectomy in the thoracic cavity.'
# print(c.replace(s, "\33[33m" +s+ "\33[0m"))
| [
198,
11748,
302,
198,
6738,
3381,
8043,
1330,
16396,
628,
198,
2,
269,
796,
705,
10267,
425,
25,
775,
12565,
1771,
29016,
341,
286,
7514,
75,
12009,
7408,
290,
304,
862,
33576,
12,
11128,
3225,
529,
505,
2243,
3366,
647,
357,
6489,
... | 3.771574 | 197 |
# GENERATED BY setup.py
commit = u"e508b6e4d"
| [
2,
24700,
1137,
11617,
11050,
9058,
13,
9078,
198,
41509,
796,
334,
1,
68,
33042,
65,
21,
68,
19,
67,
1,
198
] | 2.090909 | 22 |
# coding:utf-8
"""
_AddressParser is an implementation of a recursive descent parser for email
addresses and urls. While _AddressParser can be used directly it is not
recommended, use the the parse() method which is provided in the address
module for convenience.
The grammar supported by the parser (as well as other limitations) are
outlined below. Plugins are also supported to allow for custom more
restrictive grammar that is typically seen at large Email Service Providers
(ESPs).
For email addresses, the grammar tries to stick to RFC 5322 as much as
possible, but includes relaxed (lax) grammar as well to support for common
realistic uses of email addresses on the Internet.
Grammar:
address-list -> address { delimiter address }
mailbox -> name-addr-rfc | name-addr-lax | addr-spec | url
name-addr-rfc -> [ display-name-rfc ] angle-addr-rfc
display-name-rfc -> [ whitespace ] word { whitespace word }
angle-addr-rfc -> [ whitespace ] < addr-spec > [ whitespace ]
name-addr-lax -> [ display-name-lax ] angle-addr-lax
display-name-lax -> [ whitespace ] word { whitespace word } whitespace
angle-addr-lax -> addr-spec [ whitespace ]
addr-spec -> [ whitespace ] local-part @ domain [ whitespace ]
local-part -> dot-atom | quoted-string
domain -> dot-atom
word -> word-ascii | word-unicode
word-ascii -> atom | quoted-string
word-unicode -> unicode-atom | unicode-qstring
whitespace -> whitespace-ascii | whitespace-unicode
Additional limitations on email addresses:
1. local-part:
* Must not be greater than 64 octets
2. domain:
* No more than 127 levels
* Each level no more than 63 octets
* Texual representation can not exceed 253 characters
* No level can begin or end with -
3. Maximum mailbox length is len(local-part) + len('@') + len(domain) which
is 64 + 1 + 253 = 318 characters. Allow 194 characters for a display
name and the (very generous) limit becomes 512 characters. Allow 1024
mailboxes and the total limit on a mailbox-list is 524288 characters.
"""
import re
import addresslib.address
from addresslib.tokenizer import TokenStream
from addresslib.tokenizer import LBRACKET
from addresslib.tokenizer import AT_SYMBOL
from addresslib.tokenizer import RBRACKET
from addresslib.tokenizer import DQUOTE
from addresslib.tokenizer import BAD_DOMAIN
from addresslib.tokenizer import DELIMITER
from addresslib.tokenizer import RELAX_ATOM
from addresslib.tokenizer import WHITESPACE
from addresslib.tokenizer import UNI_WHITE
from addresslib.tokenizer import ATOM
from addresslib.tokenizer import UNI_ATOM
from addresslib.tokenizer import UNI_QSTR
from addresslib.tokenizer import DOT_ATOM
from addresslib.tokenizer import QSTRING
from addresslib.tokenizer import URL
from .utils import is_pure_ascii
from .utils import contains_control_chars
from .utils import cleanup_display_name
from .utils import cleanup_email
class _AddressParser(object):
"""
Do not use _AddressParser directly because it heavily relies on other
private classes and methods and its interface is not guaranteed. It
will change in the future and possibly break your application.
Instead use the parse() function in the address.py module which will
always return a scalar or iterable respectively.
"""
def address_list(self, stream):
"""
Extract a mailbox and/or url list from a stream of input, operates in
strict and relaxed modes.
"""
# sanity check
if not stream:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit address list length
if len(stream) > MAX_ADDRESS_LIST_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address list length of ' + str(MAX_ADDRESS_LIST_LENGTH) + '.')
# set stream
self.stream = TokenStream(stream)
if self.strict is True:
return self._address_list_strict()
return self._address_list_relaxed()
def address(self, stream):
"""
Extract a single address or url from a stream of input, always
operates in strict mode.
"""
# sanity check
if not stream:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit mailbox length
if len(stream) > MAX_ADDRESS_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address length of ' + str(MAX_ADDRESS_LENGTH) + '.')
self.stream = TokenStream(stream)
addr = self._address()
if addr:
# optional whitespace
self._whitespace()
# if we hit the end of the stream, we have a valid inbox
if self.stream.end_of_stream():
return addr
return None
def address_spec(self, stream):
"""
Extract a single address spec from a stream of input, always
operates in strict mode.
"""
# sanity check
if stream is None:
raise ParserException('No input provided to parser.')
if isinstance(stream, str) and not is_pure_ascii(stream):
raise ParserException('ASCII string contains non-ASCII chars.')
# to avoid spinning here forever, limit mailbox length
if len(stream) > MAX_ADDRESS_LENGTH:
raise ParserException('Stream length exceeds maximum allowable ' + \
'address length of ' + str(MAX_ADDRESS_LENGTH) + '.')
self.stream = TokenStream(stream)
addr = self._addr_spec()
if addr:
# optional whitespace
self._whitespace()
# if we hit the end of the stream, we have a valid inbox
if self.stream.end_of_stream():
return addr
return None
def _mailbox_post_processing_checks(self, address):
"""
Additional post processing checks to ensure mailbox is valid.
"""
parts = address.split('@')
# check if local part is less than 1024 octets, the actual
# limit is 64 octets but we allow 16x that size here because
# unsubscribe links are frequently longer
lpart = parts[0]
if len(lpart) > 1024:
return False
# check if the domain is less than 255 octets
domn = parts[1]
if len(domn) > 253:
return False
# number of labels can not be over 127
labels = domn.split('.')
if len(labels) > 127:
return False
for label in labels:
# check the domain doesn't start or end with - and
# the length of each label is no more than 63 octets
if BAD_DOMAIN.search(label) or len(label) > 63:
return False
return True
def _address_list_relaxed(self):
"""
Grammar: address-list-relaxed -> address { delimiter address }
"""
#addrs = []
addrs = addresslib.address.AddressList()
unparsable = []
# address
addr = self._address()
if addr is None:
# synchronize to the next delimiter (or end of line)
# append the skipped over text to the unparsable list
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
# if no mailbox and end of stream, we were unable
# return the unparsable stream
if self.stream.end_of_stream():
return [], unparsable
else:
# if we found a delimiter or end of stream, we have a
# valid mailbox, add it
if self.stream.peek(DELIMITER) or self.stream.end_of_stream():
addrs.append(addr)
else:
# otherwise snychornize and add it the unparsable array
skip = self.stream.synchronize()
if skip:
pre = self.stream.stream[:self.stream.stream.index(skip)]
unparsable.append(pre + skip)
# if we hit the end of the stream, return the results
if self.stream.end_of_stream():
return [], [self.stream.stream]
while True:
# delimiter
dlm = self.stream.get_token(DELIMITER)
if dlm is None:
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
if self.stream.end_of_stream():
break
# address
start_pos = self.stream.position
addr = self._address()
if addr is None:
skip = self.stream.synchronize()
if skip:
unparsable.append(skip)
if self.stream.end_of_stream():
break
else:
# if we found a delimiter or end of stream, we have a
# valid mailbox, add it
if self.stream.peek(DELIMITER) or self.stream.end_of_stream():
addrs.append(addr)
else:
# otherwise snychornize and add it the unparsable array
skip = self.stream.synchronize()
if skip:
sskip = self.stream.stream[start_pos:self.stream.position]
unparsable.append(sskip)
# if we hit the end of the stream, return the results
if self.stream.end_of_stream():
return addrs, unparsable
return addrs, unparsable
def _address_list_strict(self):
"""
Grammar: address-list-strict -> address { delimiter address }
"""
#addrs = []
addrs = addresslib.address.AddressList()
# address
addr = self._address()
if addr is None:
return addrs
if self.stream.peek(DELIMITER):
addrs.append(addr)
while True:
# delimiter
dlm = self.stream.get_token(DELIMITER)
if dlm is None:
break
# address
addr = self._address()
if addr is None:
break
addrs.append(addr)
return addrs
def _address(self):
"""
Grammar: address -> name-addr-rfc | name-addr-lax | addr-spec | url
"""
start_pos = self.stream.position
addr = self._addr_spec()
# if email address, check that it passes post processing checks
if addr and isinstance(addr, addresslib.address.EmailAddress):
if self._mailbox_post_processing_checks(addr.address) is False:
# roll back
self.stream.position = start_pos
return None
return addr
def _addr_spec(self, as_string=False):
"""
Grammar: addr-spec -> [ whitespace ] local-part @ domain [ whitespace ]
"""
start_pos = self.stream.position
# optional whitespace
self._whitespace()
lpart = self._local_part()
if lpart is None:
# rollback
self.stream.position = start_pos
return None
asym = self.stream.get_token(AT_SYMBOL)
if asym is None:
# rollback
self.stream.position = start_pos
return None
domn = self._domain()
if domn is None:
# rollback
self.stream.position = start_pos
return None
# optional whitespace
self._whitespace()
aspec = cleanup_email(''.join([lpart, asym, domn]))
if as_string:
return aspec
return addresslib.address.EmailAddress(aspec)
def _local_part(self):
"""
Grammar: local-part -> dot-atom | quoted-string
"""
return self.stream.get_token(DOT_ATOM) or \
self.stream.get_token(QSTRING)
def _domain(self):
"""
Grammar: domain -> dot-atom
"""
return self.stream.get_token(DOT_ATOM)
def _word(self):
"""
Grammar: word -> word-ascii | word-unicode
"""
start_pos = self.stream.position
# ascii word
ascii_wrd = self._word_ascii()
if ascii_wrd and not self.stream.peek(UNI_ATOM):
return ascii_wrd
# didn't get an ascii word, rollback to try again
self.stream.position = start_pos
# unicode word
return self._word_unicode()
def _word_ascii(self):
"""
Grammar: word-ascii -> atom | qstring
"""
wrd = self.stream.get_token(RELAX_ATOM) or self.stream.get_token(QSTRING)
if wrd and not contains_control_chars(wrd):
return wrd
return None
def _word_unicode(self):
"""
Grammar: word-unicode -> unicode-atom | unicode-qstring
"""
start_pos = self.stream.position
# unicode atom
uwrd = self.stream.get_token(UNI_ATOM)
if uwrd and isinstance(uwrd, str) and not contains_control_chars(uwrd):
return uwrd
# unicode qstr
uwrd = self.stream.get_token(UNI_QSTR, 'qstr')
if uwrd and isinstance(uwrd, str) and not contains_control_chars(uwrd):
return '"{0}"'.format(encode_string(None, uwrd))
# rollback
self.stream.position = start_pos
return None
def _whitespace(self):
"""
Grammar: whitespace -> whitespace-ascii | whitespace-unicode
"""
return self._whitespace_ascii() or self._whitespace_unicode()
def _whitespace_ascii(self):
"""
Grammar: whitespace-ascii -> whitespace-ascii
"""
return self.stream.get_token(WHITESPACE)
def _whitespace_unicode(self):
"""
Grammar: whitespace-unicode -> whitespace-unicode
"""
uwhite = self.stream.get_token(UNI_WHITE)
if uwhite and not is_pure_ascii(uwhite):
return uwhite
return None
class ParserException(Exception):
"""
Exception raised when the parser encounters some parsing exception.
"""
MAX_ADDRESS_LENGTH = 1280
MAX_ADDRESS_NUMBER = 1024
MAX_ADDRESS_LIST_LENGTH = MAX_ADDRESS_LENGTH * MAX_ADDRESS_NUMBER
| [
2,
19617,
25,
40477,
12,
23,
198,
198,
37811,
198,
62,
20231,
46677,
318,
281,
7822,
286,
257,
45115,
18598,
30751,
329,
3053,
198,
2860,
16746,
290,
2956,
7278,
13,
2893,
4808,
20231,
46677,
460,
307,
973,
3264,
340,
318,
407,
198,
... | 2.265604 | 6,585 |
"""
Path parameters
---------------
Parameters to set the precomputations in order to facilitate the computation
model tasks
"""
pathdata_in = '/home/antonio/Desktop/MSc Thesis/code/Data/raw_data/Testing'
pathdata = '/home/antonio/Desktop/MSc Thesis/code/Data/Data/Cleaned/FirmsData'
pathfolder = '/home/antonio/Desktop/MSc Thesis/code/Data/Data/'
logfile = 'logfile.log'
| [
198,
37811,
198,
15235,
10007,
198,
24305,
198,
48944,
284,
900,
262,
662,
785,
1996,
602,
287,
1502,
284,
15570,
262,
29964,
198,
19849,
8861,
198,
198,
37811,
198,
198,
6978,
7890,
62,
259,
796,
31051,
11195,
14,
23026,
952,
14,
368... | 3.04878 | 123 |
telegram_token = # your telegram token
yandex_token = # your yandex-translator token
| [
660,
30536,
62,
30001,
796,
1303,
534,
573,
30536,
11241,
198,
88,
392,
1069,
62,
30001,
796,
1303,
534,
331,
392,
1069,
12,
7645,
41880,
11241,
198
] | 3.148148 | 27 |
import torch
import time
import numpy as np
import hess
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
from gpytorch.utils.lanczos import lanczos_tridiag, lanczos_tridiag_to_diag
################################################################################
# Supporting Functions
################################################################################
def gradtensor_to_tensor(net, include_bn=False):
"""
convert the grad tensors to a list
"""
filter = lambda p: include_bn or len(p.data.size()) > 1
return flatten([p.grad.data for p in net.parameters() if filter(p)])
################################################################################
# For computing Hessian-vector products
################################################################################
def eval_hess_vec_prod(vec, params, net, criterion, inputs=None, targets=None,
dataloader=None,
use_cuda=False):
"""
Evaluate product of the Hessian of the loss function with a direction vector "vec".
The product result is saved in the grad of net.
Args:
vec: a list of tensor with the same dimensions as "params".
params: the parameter list of the net (ignoring biases and BN parameters).
net: model with trained parameters.
criterion: loss function.
dataloader: dataloader for the dataset.
use_cuda: use GPU.
"""
if use_cuda:
net.cuda()
vec = [v.cuda() for v in vec]
net.zero_grad() # clears grad for every parameter in the net
if dataloader is None:
inputs, targets = Variable(inputs), Variable(targets)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# outputs = net(inputs)
loss = criterion(net(inputs), targets)
grad_f = torch.autograd.grad(loss, inputs=params, create_graph=True)
# Compute inner product of gradient with the direction vector
# prod = Variable(torch.zeros(1)).type(type(grad_f[0].data))
prod = torch.zeros(1, dtype=grad_f[0].dtype, device=grad_f[0].device)
for (g, v) in zip(grad_f, vec):
print(g.shape, v.shape)
prod = prod + (g * v).sum()
# Compute the Hessian-vector product, H*v
# prod.backward() computes dprod/dparams for every parameter in params and
# accumulate the gradients into the params.grad attributes
prod.backward()
else:
for batch_idx, (inputs, targets) in enumerate(dataloader):
#inputs, targets = Variable(inputs), Variable(targets)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
loss = criterion(net(inputs), targets)
grad_f = torch.autograd.grad(loss, inputs=params, create_graph=True)
# Compute inner product of gradient with the direction vector
prod = 0.
for (g, v) in zip(grad_f, vec):
prod = prod + (g * v).sum()
# Compute the Hessian-vector product, H*v
# prod.backward() computes dprod/dparams for every parameter in params and
# accumulate the gradients into the params.grad attributes
prod.backward()
#############################
# Return Hessian of a model #
#############################
| [
11748,
28034,
198,
11748,
640,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
339,
824,
198,
6738,
28034,
1330,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
2306,
519,
6335,
1330,
35748,
198,
198,
... | 2.59156 | 1,327 |
# Description: Switch Case (SIMULATED in Python)
# Note
# 1. There is NO switch statement in python.
# 2. A dictionary can be used to simulate switch case in python.
# 3. This style is also called loop-and-a-half.
print(switch('b'))
| [
2,
12489,
25,
14645,
8913,
357,
48913,
6239,
11617,
287,
11361,
8,
198,
198,
2,
5740,
198,
2,
352,
13,
1318,
318,
8005,
5078,
2643,
287,
21015,
13,
198,
2,
362,
13,
317,
22155,
460,
307,
973,
284,
29308,
5078,
1339,
287,
21015,
13... | 3.309859 | 71 |
import csv
import json
import operator
import os
import sqlite3
import urllib.request
from raven import Client
from flask import Flask, request, session, g, redirect, make_response, url_for, abort, escape, render_template, flash, send_from_directory
from passwords import SENTRY_DSN
client = Client(SENTRY_DSN) #add debugging
app = Flask(__name__, ) # create application instance
app.config.from_object(__name__) # load confi from this file, app.py
#default config
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'mypower.db'),
SECRET_KEY= os.urandom(24)
))
app.config.from_envvar('MYPOWER_SETTINGS', silent=True) ##loads settings if exist, doesn't complain if they don't
def connect_db():
"""Connects to the specific database."""
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def compare_renewable(arg):
"""Compares non-renewable offer to paying for a renewable plan"""
user_preferences = get_saved_data('user')
offer_id = arg[0]
percent_renewable = int(arg[12])
top_offers = get_saved_data('offers')
if percent_renewable != 100:
db = get_db()
t = (user_preferences["tdu"], user_preferences["contract"], 100, 'FALSE')
usage = int(user_preferences["usage"])
cur = db.execute('SELECT * FROM offers WHERE TduCompanyName=? AND TermValue >=? AND Renewable >=? AND MinUsageFeesCredits = ? AND kwh500 IS NOT NULL', t)
result = cur.fetchall()
user_offers = {}
for row in result:
kwh2000 = row[6]
kwh1000 = row[5]
kwh500 = row[4]
idkey = row[0]
if usage > 1000:
price = round(usage * kwh2000, 0)
elif usage > 500:
price = round(usage * kwh1000, 0)
else:
price = round(usage * kwh500, 0)
user_offers.update({idkey: price})
sorted_offer = sorted(user_offers.items(), key=operator.itemgetter(1))
sorted_offer = sorted_offer[:1]
sorted_offer = dict(sorted_offer)
return sorted_offer
else:
return {}
@app.teardown_appcontext
def close_db(error):
"""Closes the database again at the end of the request."""
if hasattr(g, 'sqlite_db'):
g.sqlite_db.close()
def avg_price(user_preferences):
"""Estimates monthly electric bill"""
db = get_db()
t = (user_preferences["tdu"], user_preferences["contract"], user_preferences["renewable"], 'FALSE')
usage = int(user_preferences["usage"])
usage_upper = usage * 1.25
cur = db.execute('SELECT * FROM offers WHERE TduCompanyName=? AND TermValue >=? AND Renewable >=? AND MinUsageFeesCredits = ? AND kwh500 IS NOT NULL', t)
result = cur.fetchall()
user_offers = {}
for row in result:
kwh2000 = row[6]
kwh1000 = row[5]
kwh500 = row[4]
idkey = row[0]
if usage > 1000:
price = round(usage * kwh2000, 0)
elif usage > 500:
price = round(usage * kwh1000, 0)
else:
price = round(usage * kwh500, 0)
##compare to an upper price to heelp filter out bad offers
if usage_upper > 1000:
price_upper = round(usage_upper * kwh2000, 0)
elif usage_upper > 500:
price_upper = round(usage_upper * kwh1000, 0)
else:
price_upper = round(usage_upper * kwh500, 0)
price_ratio = (price_upper - price)/price_upper
##if prices jump by 50% with an increase usage of 25% then don't consider them
if price_ratio >= 0.50:
pass
else:
user_offers.update({idkey: price})
sorted_offer = sorted(user_offers.items(), key=operator.itemgetter(1))
sorted_offer = sorted_offer[:10]
sorted_offer = dict(sorted_offer)
return sorted_offer
@app.route('/offers/')
@app.route('/offers/<int:idKey>/')
@app.route('/save', methods=['GET', 'POST']) ##method only accesible if your post to it
@app.route('/about/')
@app.route('/')
@app.route('/sitemap/')
@app.errorhandler(404)
@app.errorhandler(500)
@app.route('/robots.txt')
@app.route('/sitemap.xml')
if __name__ == '__main__':
app.run(debug=True)
| [
11748,
269,
21370,
198,
11748,
33918,
198,
11748,
10088,
198,
11748,
28686,
198,
11748,
44161,
578,
18,
198,
11748,
2956,
297,
571,
13,
25927,
198,
6738,
37735,
1330,
20985,
198,
198,
6738,
42903,
1330,
46947,
11,
2581,
11,
6246,
11,
30... | 2.337185 | 1,904 |
import os
import json
import pytest
import pipeline_tools.shared.submission.create_analysis_file as caf
from pathlib import Path
@pytest.fixture(scope='module')
@pytest.fixture
| [
11748,
28686,
198,
11748,
33918,
198,
11748,
12972,
9288,
198,
198,
11748,
11523,
62,
31391,
13,
28710,
13,
7266,
3411,
13,
17953,
62,
20930,
62,
7753,
355,
19945,
198,
6738,
3108,
8019,
1330,
10644,
628,
198,
31,
9078,
9288,
13,
69,
... | 3.22807 | 57 |
# The following comment should be removed at some point in the future.
# mypy: strict-optional=False
import os
import re
import sys
from typing import Optional
from pipenv.patched.notpip._internal.locations import site_packages, user_site
from pipenv.patched.notpip._internal.utils.virtualenv import (
running_under_virtualenv,
virtualenv_no_global,
)
__all__ = [
"egg_link_path_from_sys_path",
"egg_link_path_from_location",
]
def _egg_link_name(raw_name: str) -> str:
"""
Convert a Name metadata value to a .egg-link name, by applying
the same substitution as pkg_resources's safe_name function.
Note: we cannot use canonicalize_name because it has a different logic.
"""
return re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link"
def egg_link_path_from_sys_path(raw_name: str) -> Optional[str]:
"""
Look for a .egg-link file for project name, by walking sys.path.
"""
egg_link_name = _egg_link_name(raw_name)
for path_item in sys.path:
egg_link = os.path.join(path_item, egg_link_name)
if os.path.isfile(egg_link):
return egg_link
return None
def egg_link_path_from_location(raw_name: str) -> Optional[str]:
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
sites.append(site_packages)
if not virtualenv_no_global() and user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
egg_link_name = _egg_link_name(raw_name)
for site in sites:
egglink = os.path.join(site, egg_link_name)
if os.path.isfile(egglink):
return egglink
return None
| [
2,
383,
1708,
2912,
815,
307,
4615,
379,
617,
966,
287,
262,
2003,
13,
198,
2,
616,
9078,
25,
7646,
12,
25968,
28,
25101,
198,
198,
11748,
28686,
198,
11748,
302,
198,
11748,
25064,
198,
6738,
19720,
1330,
32233,
198,
198,
6738,
734... | 2.54143 | 881 |
from django.contrib.auth import login, logout
from django.contrib.auth.decorators import login_required
from django.core.exceptions import NON_FIELD_ERRORS
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render, redirect
from users.forms import RegistrationForm, LoginForm, SettingsForm
from users.models import User
@login_required
@login_required
@login_required
| [
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
1330,
17594,
11,
2604,
448,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
12501,
273,
2024,
1330,
17594,
62,
35827,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
44521,
... | 3.534483 | 116 |
from django.apps import AppConfig
| [
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
628
] | 3.888889 | 9 |
####################
# Import libraries #
####################
# standard numerical libraries
import numpy as np
import pandas as pd
# Plotting libraries
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# Sklearn model evaluation
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, roc_curve, confusion_matrix
# For generating multi-colour confusion matrices
from numpy.ma import masked_array
import itertools
########################
# Individual functions #
########################
def evaluate_classifiers(trained_classifiers, X, y_true):
'''
Calculates predictions and classifier statistics from a dictionary of trained sklearn classifiers
Parameters
----------
trained_classifiers : dict of sklearn classifiers
X : pandas.DataFrame
Feature set
y_true : pandas.Series
Corresponding label set
Returns
-------
y_scores : pandas.DataFrame
True labels ('LABELS') vs probability scores for all classifiers
y_preds : pandas.DataFrame
True labels ('LABELS') vs predicted classes for all classifiers
classifier_metrics : pandas.DataFrame
Summary statistics for classifier performance
roc_curves : dict
Dictionary of arrays for plotting ROC curves
confusion_matrices : dict
Dictionary of arrays for plotting confusion matrices
'''
y_preds = {'LABELS': y_true.values}
y_scores = {'LABELS': y_true.values}
accuracies = {}
f1_scores = {}
roc_auc_scores = {}
gini_scores = {}
roc_curves = {}
confusion_matrices = {}
for name, clf in trained_classifiers.items():
# Store the classic accuracy score
accuracies[name] = clf.score(X, y_true)
# Calculate the F1 scores
y_pred = clf.predict(X)
f1_scores[name] = f1_score(y_true, y_pred)
y_preds[name] = y_pred
# Calculate and store ROC curves and AUC scores
y_score = clf.predict_proba(X)[:, 1]
y_scores[name] = y_score
roc_curves[name] = roc_curve(y_true, y_score)
roc_auc_scores[name] = roc_auc_score(y_true, y_score)
gini_scores[name] = 2*roc_auc_scores[name] - 1
# Store confusion matrices
confusion_matrices[name] = confusion_matrix(y_true, y_pred)
# Compile results DataFrames
y_scores = pd.DataFrame(y_scores)
y_preds = pd.DataFrame(y_preds)
classifier_metrics = pd.DataFrame({
'accuracy': accuracies,
'f1_score': f1_scores,
'roc-auc': roc_auc_scores,
'gini': gini_scores
})
return y_scores, y_preds, classifier_metrics, roc_curves, confusion_matrices
def plot_roc(roc_curves, baseline=True, perfect_clf_line=True, color_palette='standard'):
'''
Plots ROC curves
Parameters
----------
roc_curves : dict
Dict of {'model_name': sklearn ROC parameters}
baseline : bool
If True, plots diagonal random-guesser line
perfect_clf_line : bool
If True, plots perfect classifier line
color_palette : str
One of {'standard', 'comparison'}:
- If 'standard', uses standard seaborn categorical color_palette
- If 'comparison', plots one line blue, and one line red - useful for comparing new and old models back to back (has to be 2 models in `roc_curves` only)
'''
plt.figure(figsize=[20, 10])
# Plot baselines if specified
if baseline:
baseline = plt.plot((0, 1), (0, 1), 'k--', label='baseline')
if perfect_clf_line:
perfect = plt.plot((0, 0, 1), (0, 1, 1), '--', color='#FF33F0', label='perfect_classifier')
# Select plot palette
if color_palette == 'standard':
colours = sns.color_palette(n_colors=len(roc_curves))
elif color_palette == 'comparison':
if len(roc_curves) == 2:
colours = ['b', 'r']
else:
raise ValueError('Input only 2 roc curves for "comparison" color_palette')
# Plot all ROC curves
for plot_line_number, (model_name, roc) in enumerate(roc_curves.items()):
fpr, tpr, thresholds = roc
plt.plot(fpr, tpr, '-', color=colours[plot_line_number], label=model_name)
plt.title('ROC Curve', fontsize='xx-large')
plt.xlabel('False Positive Rate \n (False Positives / All Negatives)', fontsize='x-large')
plt.ylabel('True Positive Rate \n (True Positives / All Positives)', fontsize='x-large')
plt.legend(fontsize='x-large');
def generate_label_palettes(categorical_palette, n_labels=2, plotter='mpl'):
'''
Given a parent seaborn categorical palette, generates single-colour sequential palettes for each colour in the parent palette, up to n_labels colours
Parameters
----------
categorical_palette : seaborn.palettes._ColorPalette
Parent palette of various colours
n_labels : int
Number of labels (dependent variables) in the classification task
plotter : str
One of {'sns', 'mpl'}:
- 'sns' dictates the output palettes will be in `seaborn` color_palette format
- 'mpl' dictates the output palettes will be in `matplotlib` colormap format
Returns
-------
label_palettes : dict
Dictionary of format {'label_name': single-colour map/palette}
'''
label_palettes = {}
for i in range(n_labels):
if plotter == 'sns':
label_palettes[f'label_{i}'] = sns.light_palette(categorical_palette[i], n_colors=50)
elif plotter == 'mpl':
label_palettes[f'label_{i}'] = ListedColormap(sns.light_palette(categorical_palette[i], n_colors=50).as_hex())
else:
raise ValueError(f'plotter type {plotter} not recognised')
return label_palettes
def plot_confusion_matrix(cm, classes, title='Confusion Matrix', fig=None, index=111, categorical_palette=sns.color_palette()):
'''
Plots confusion matrix, with class colours consistent with other plots
Parameters
----------
cm : np.array
Confusion matrix array
classes : int
Number of unique classes in the target variable
normalize : bool
Whether to display absolute or proportional values in the confusion matrix
title : str
Title of plot
'''
# fig, ax = plt.subplots(figsize=[5, 5])
if fig is None:
fig = plt.figure(figsize=[5, 5])
ax = fig.add_subplot(index)
# Normalise confusion matrix, for color scale and, optionally, for text display
cm_norm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Generate varying palettes for each class from parent categorical palette
label_palettes = generate_label_palettes(categorical_palette, n_labels=cm.shape[0], plotter='mpl')
for i, label in enumerate(cm_norm):
# Mask confusion matrix for each label, in order to apply a separate label colormap for each
mask = np.zeros_like(cm_norm)
# Imshow builds from bottom to top; row index for confusion matrix array is the opposite
inverted_index = mask.shape[0] - 1 - i
mask[inverted_index, :] = 1
cm_masked = masked_array(cm_norm, mask)
# Get label color palette
cmap = label_palettes[f'label_{i}']
# Plot label color intensities, based on normalised values
cm_label = ax.imshow(cm_masked, interpolation='nearest', cmap=cmap, vmin=0, vmax=1)
# Plot confusion matrix values
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
text = str(round(cm[i, j], 2)) + '\n' + str(round(100*cm_norm[i, j], 1)) + '%'
ax.text(j, i+0.07, text,
horizontalalignment="center",
color="white" if cm_norm[i, j] > 0.5 else "black",
fontsize='x-large')
# Formatting
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks)
ax.set_xticklabels(classes, fontsize='x-large')
ax.set_yticks(tick_marks)
ax.set_yticklabels(classes, fontsize='x-large')
# ax.tight_layout()
ax.set_title(title, fontsize='xx-large')
ax.set_xlabel('Predicted Label', fontsize='x-large')
ax.set_ylabel('True Label', fontsize='x-large')
# plt.show()
return ax
def get_subplot_dims(num_plots, max_cols=3):
'''Return subplot layout for a given number of plots'''
if num_plots <= max_cols:
num_rows = 1
num_cols = num_plots
else:
num_rows = num_plots // max_cols
remainder = num_plots % max_cols
if remainder > 0:
num_rows += 1
num_cols = max_cols
return num_rows, num_cols
def plot_confusion_matrices(confusion_matrices, y_preds):
'''
Plots a series of confusion matrices from a dictionary of CM arrays, from a set of trained models
Parameters
----------
confusion_matrices : dict
Dict of confusion matrix arrays
'''
num_rows, num_cols = get_subplot_dims(len(confusion_matrices))
fig, ax = plt.subplots(num_rows, num_cols, figsize=[20*num_rows, 5*num_cols])
for ax_number, (clf_name, cm) in enumerate(confusion_matrices.items()):
# Turn off initial axis
ax[ax_number].axis('off')
# Generate CM title
title = f'Confusion Matrix \n {clf_name}'
# Get subplot index and plot CM for given index
index = int(str(num_rows) + str(num_cols) + str(ax_number + 1))
plot_confusion_matrix(cm, classes=y_preds[clf_name].unique(), fig=fig, index=index, title=title)
def plot_binary_clf_histogram(y_test, y_pred, bins=50, normalize=True, fig=None, index=111, title='Model predictions (histograms of positive and negative classes)', categorical_palette=sns.color_palette()):
'''
Plot histograms of model-predicted probability counts for both classes
Parameters
----------
y_test : pandas.Series
Test-set labels
y_pred : pandas.Series
Test-set model predictions
bins : int
Number of bins in each histogram
normalize : bool
Whether to display absolute or relative counts (useful for visualising when big 0/1 class imbalance)
'''
if fig is None:
fig = plt.figure(figsize=[20, 10])
ax = fig.add_subplot(index)
negatives = y_pred[y_test == False]
positives = y_pred[y_test == True]
# plt.figure(figsize=[20, 10])
sns.set_palette(categorical_palette)
sns.distplot(negatives, hist=True, kde=False, norm_hist=normalize, bins=bins, ax=ax)
sns.distplot(positives, hist=True, kde=False, norm_hist=normalize, bins=bins, ax=ax)
ax.set_xlabel('Model predicted probability of positive class', fontsize='x-large'),
ax.set_ylabel('Counts (of binned probability)', fontsize='x-large')
ax.set_title(title, fontsize='xx-large')
def plot_binary_clf_histograms(y_test, y_scores):
'''
Plots a series of histograms from a set of y predictions
Parameters
----------
confusion_matrices : dict
Dict of confusion matrix arrays
'''
y_scores_models = y_scores.drop('LABELS', axis=1)
num_rows, num_cols = get_subplot_dims(y_scores_models.shape[1], max_cols=1)
fig, ax = plt.subplots(num_rows, num_cols, figsize=[20, 10*num_rows])
for ax_number, (clf_name, y_score) in enumerate(y_scores_models.iteritems()):
# Turn off initial axis
ax[ax_number].axis('off')
# Generate CM title
title = f'Model Predictions \n {clf_name}'
# Get subplot index and plot CM for given index
index = int(str(num_rows) + str(num_cols) + str(ax_number + 1))
plot_binary_clf_histogram(y_test, y_score, title=title, fig=fig, index=index) | [
14468,
4242,
198,
2,
17267,
12782,
1303,
198,
14468,
4242,
198,
198,
2,
3210,
29052,
12782,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
2,
28114,
889,
12782,
198,
11748,
384,
397,
1211,
355,
301... | 2.425046 | 4,863 |
#!/bin/env python
# used to update all local and remote references into this bin folder
import subprocess
GITHUB_SSH_BASE_URL = 'git@github.com:'
GITHUB_HTTPS_BASE_URL = 'https://github.com/'
if __name__ == "__main__":
UPDATE_QUEUE = [
intro_msg,
clone_go_buffer_repo,
]
for update_action in UPDATE_QUEUE:
std_out = update_action()
print(std_out)
| [
2,
48443,
8800,
14,
24330,
21015,
198,
198,
2,
973,
284,
4296,
477,
1957,
290,
6569,
10288,
656,
428,
9874,
9483,
198,
11748,
850,
14681,
198,
198,
38,
10554,
10526,
62,
5432,
39,
62,
33,
11159,
62,
21886,
796,
705,
18300,
31,
12567... | 2.321637 | 171 |
from __future__ import division
import torch
class PenaltyBuilder(object):
"""Returns the Length and Coverage Penalty function for Beam Search.
Args:
length_pen (str): option name of length pen
cov_pen (str): option name of cov pen
Attributes:
has_cov_pen (bool): Whether coverage penalty is None (applying it
is a no-op). Note that the converse isn't true. Setting beta
to 0 should force coverage length to be a no-op.
has_len_pen (bool): Whether length penalty is None (applying it
is a no-op). Note that the converse isn't true. Setting alpha
to 1 should force length penalty to be a no-op.
coverage_penalty (callable[[torch.FloatTensor, float],
torch.FloatTensor]): Calculates the coverage penalty.
length_penalty (callable[[int, float], float]): Calculates
the length penalty.
"""
@staticmethod
# Below are all the different penalty terms implemented so far.
# Subtract coverage penalty from topk log probs.
# Divide topk log probs by length penalty.
def coverage_wu(self, cov, beta=0.):
"""GNMT coverage re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
``cov`` is expected to be sized ``(*, seq_len)``, where ``*`` is
probably ``batch_size x beam_size`` but could be several
dimensions like ``batch_size, beam_size``. If ``cov`` is attention,
then the ``seq_len`` axis probably sums to (almost) 1.
"""
penalty = -torch.min(cov, cov.clone().fill_(1.0)).log().sum(-1)
return beta * penalty
def coverage_summary(self, cov, beta=0.):
"""Our summary penalty."""
penalty = torch.max(cov, cov.clone().fill_(1.0)).sum(-1)
penalty -= cov.size(-1)
return beta * penalty
def coverage_none(self, cov, beta=0.):
"""Returns zero as penalty"""
none = torch.zeros((1,), device=cov.device,
dtype=torch.float)
if cov.dim() == 3:
none = none.unsqueeze(0)
return none
def length_wu(self, cur_len, alpha=0.):
"""GNMT length re-ranking score.
See "Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
return ((5 + cur_len) / 6.0) ** alpha
def length_average(self, cur_len, alpha=0.):
"""Returns the current sequence length."""
return cur_len
def length_none(self, cur_len, alpha=0.):
"""Returns unmodified scores."""
return 1.0
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
11748,
28034,
628,
198,
4871,
41676,
32875,
7,
15252,
2599,
198,
220,
220,
220,
37227,
35561,
262,
22313,
290,
33998,
41676,
2163,
329,
25855,
11140,
13,
628,
220,
220,
220,
943,
14542,
25,
198,... | 2.51938 | 1,032 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import llnl.util.tty as tty
import spack.cmd
import spack.cmd.common.arguments as arguments
import spack.environment as ev
description = 'remove specs from an environment'
section = "environments"
level = "long"
| [
2,
15069,
2211,
12,
1238,
2481,
13914,
45036,
3549,
2351,
4765,
11,
11419,
290,
584,
198,
2,
1338,
441,
4935,
34152,
13,
4091,
262,
1353,
12,
5715,
27975,
38162,
9947,
2393,
329,
3307,
13,
198,
2,
198,
2,
30628,
55,
12,
34156,
12,
... | 3.393443 | 122 |
# Copyright (C) 2008-2011 Oracle Corporation
#
# This file is part of a free software library; you can redistribute
# it and/or modify it under the terms of the GNU Lesser General
# Public License version 2.1 as published by the Free Software
# Foundation and shipped in the "COPYING.LIB" file with this library.
# The library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY of any kind.
#
# Oracle LGPL Disclaimer: For the avoidance of doubt, except that if
# any license choice other than GPL or LGPL is available it will
# apply instead, Oracle elects to use only the Lesser General Public
# License version 2.1 (LGPLv2) at this time for any software where
# a choice of LGPL license versions is made available with the
# language indicating that LGPLv2 or any later version may be used,
# or where a choice of which version of the LGPL is applied is
# otherwise unspecified.
#
# This file is autogenerated from VirtualBox.xidl, DO NOT EDIT!
#
from VirtualBox_services import *
try:
from VirtualBox_client import *
except:
pass
import base64
| [
2,
15069,
357,
34,
8,
3648,
12,
9804,
18650,
10501,
198,
2,
198,
2,
770,
2393,
318,
636,
286,
257,
1479,
3788,
5888,
26,
345,
460,
17678,
4163,
198,
2,
340,
290,
14,
273,
13096,
340,
739,
262,
2846,
286,
262,
22961,
12892,
263,
... | 3.506211 | 322 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-25 15:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
37770,
352,
13,
940,
13,
18,
319,
1584,
12,
1065,
12,
1495,
1315,
25,
1433,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
... | 2.73913 | 69 |
import os
import sys
import smtplib
import socket
import urllib2
import datetime
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.multipart import MIMEMultipart
from alerta.common import log as logging
from alerta.common import config
LOG = logging.getLogger(__name__)
CONF = config.CONF
| [
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
895,
83,
489,
571,
198,
11748,
17802,
198,
11748,
2956,
297,
571,
17,
198,
11748,
4818,
8079,
198,
198,
6738,
3053,
13,
76,
524,
13,
5239,
1330,
337,
3955,
2767,
2302,
198,
6738,
3053,... | 3.026786 | 112 |
import numpy as np
import time
from hmsolver.meshgrid import Zone2d
from hmsolver.meshgrid import Mesh2d
from hmsolver.material import Material2d
from hmsolver.femcore import point_criteria
from hmsolver.femcore import segment_criteria
from hmsolver.femcore import boundary_cond2d as _bc_ # abbreviation
from hmsolver.femcore import BoundaryConds2d
from hmsolver.app import Simulation2d
from hmsolver.basis import Quad4Node
from hmsolver.utils import formatting_time
if __name__ == '__main__':
t0 = time.time() # tic
# 几何区域
zone_xl, zone_xr = 0, 1
zone_yl, zone_yr = 0, 1
zone_xmid = 0.5 * (zone_xl + zone_xr)
zone_ymid = 0.5 * (zone_yl + zone_yr)
grid_num, grid_size = 50, 0.02
zone = Zone2d(zone_xl, zone_xr, zone_yl, zone_yr)
mesh2d = zone.meshgrid_zone_safe(Mesh2d, grid_num)
n_nodes, n_elements = mesh2d.n_nodes, mesh2d.n_elements
# 输出网格基本信息
print(f"Mesh contains {n_nodes} nodes and {n_elements} elements.")
print(f"Average Grid Size= {grid_size:.8f}")
# 建立材料实例
material2d = Material2d(300, 0.25)
# 边界条件
stretch = 0.02
boundary_0 = point_criteria(zone_xmid, zone_yl)
boundary_1 = segment_criteria(zone_xl, zone_yl, zone_xr, zone_yl)
boundary_2 = segment_criteria(zone_xl, zone_yr, zone_xr, zone_yr)
boundarys = BoundaryConds2d(
_bc_("point", boundary_0, "fixed", None, None),
_bc_("segment", boundary_1, "set_uy", "constant", 0),
_bc_("segment", boundary_2, "set_uy", "constant", +stretch))
boundarys.manually_verify()
# 建立模拟实例
app = Simulation2d(mesh2d, material2d, boundarys)
app.app_name = "example-01-plate-unixial-tensile"
app.apply_basis(Quad4Node())
app.parallelized = True # 开启并行多线程
app.check_engine()
# 输出模拟结果
app.export_to_tecplot("elasticity", *app.provied_solutions)
print(f"Total time cost: {formatting_time(time.time() - t0)}") | [
11748,
299,
32152,
355,
45941,
198,
11748,
640,
198,
198,
6738,
289,
907,
14375,
13,
76,
5069,
25928,
1330,
13035,
17,
67,
198,
6738,
289,
907,
14375,
13,
76,
5069,
25928,
1330,
47529,
17,
67,
198,
6738,
289,
907,
14375,
13,
33665,
... | 2.120133 | 899 |
#
# PySNMP MIB module RUCKUS-ZD-WLAN-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RUCKUS-ZD-WLAN-MIB
# Produced by pysmi-0.3.4 at Wed May 1 14:59:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
IANAifType, = mibBuilder.importSymbols("IANAifType-MIB", "IANAifType")
ifIndex, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndex")
ruckusZDWLANModule, = mibBuilder.importSymbols("RUCKUS-ROOT-MIB", "ruckusZDWLANModule")
RuckusRadioMode, RuckusAdminStatus, RuckusSSID, RuckusRateLimiting, RuckusdB = mibBuilder.importSymbols("RUCKUS-TC-MIB", "RuckusRadioMode", "RuckusAdminStatus", "RuckusSSID", "RuckusRateLimiting", "RuckusdB")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
TimeTicks, Unsigned32, IpAddress, Gauge32, ObjectIdentity, Counter32, Integer32, iso, ModuleIdentity, MibIdentifier, Bits, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "TimeTicks", "Unsigned32", "IpAddress", "Gauge32", "ObjectIdentity", "Counter32", "Integer32", "iso", "ModuleIdentity", "MibIdentifier", "Bits", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64")
TextualConvention, DisplayString, TruthValue, PhysAddress, RowStatus, MacAddress = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "TruthValue", "PhysAddress", "RowStatus", "MacAddress")
ruckusZDWLANMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1))
if mibBuilder.loadTexts: ruckusZDWLANMIB.setLastUpdated('201010150800Z')
if mibBuilder.loadTexts: ruckusZDWLANMIB.setOrganization('Ruckus Wireless, Inc.')
if mibBuilder.loadTexts: ruckusZDWLANMIB.setContactInfo('Ruckus Wireless, Inc. Postal: 880 W Maude Ave Sunnyvale, CA 94085 USA EMail: support@ruckuswireless.com Phone: +1-650-265-4200')
if mibBuilder.loadTexts: ruckusZDWLANMIB.setDescription('Ruckus ZD WLAN mib')
ruckusZDWLANObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1))
ruckusZDWLANInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1))
ruckusZDWLANTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1), )
if mibBuilder.loadTexts: ruckusZDWLANTable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANTable.setDescription('ZD WLAN table.')
ruckusZDWLANEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ruckusZDWLANEntry.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANEntry.setDescription('Specifies each ZD WLAN entry.')
ruckusZDWLANSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 1), RuckusSSID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANSSID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANSSID.setDescription('SSID.')
ruckusZDWLANDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDescription.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDescription.setDescription('WLAN description.')
ruckusZDWLANAuthentication = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAuthentication.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAuthentication.setDescription('Authentication method.')
ruckusZDWLANEncryption = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANEncryption.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANEncryption.setDescription('Encryption method.')
ruckusZDWLANIsGuest = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 5), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANIsGuest.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANIsGuest.setDescription('This WLAN is for guest access.')
ruckusZDWLANSSIDBcastDisable = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANSSIDBcastDisable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANSSIDBcastDisable.setDescription('Hide SSID.')
ruckusZDWLANVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVlanID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVlanID.setDescription('Specifies the VLAN ID of the WLAN. If VLAN ID is 1, packets from this WLAN will be untagged.')
ruckusZDWLANRateLimitingUp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 8), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRateLimitingUp.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRateLimitingUp.setDescription('Rate limiting of uplink.')
ruckusZDWLANRateLimitingDown = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRateLimitingDown.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRateLimitingDown.setDescription('Rate limiting of downlink.')
ruckusZDWLANTunnelWLAN = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 10), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANTunnelWLAN.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANTunnelWLAN.setDescription('Tunnel WLAN.')
ruckusZDWLANNumVAP = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANNumVAP.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANNumVAP.setDescription('Number of APs.')
ruckusZDWLANNumSta = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANNumSta.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANNumSta.setDescription('Number of client devices.')
ruckusZDWLANRxPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRxPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRxPkts.setDescription('Received packets.')
ruckusZDWLANRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRxBytes.setDescription('Received bytes.')
ruckusZDWLANTxPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANTxPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANTxPkts.setDescription('Transmitted packets.')
ruckusZDWLANTxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANTxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANTxBytes.setDescription('Transmitted bytes.')
ruckusZDWLANAuthTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 26), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAuthTotal.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAuthTotal.setDescription('Accumulated total number of attempt to authenticate by wireless terminals on this WLAN, or Authentication request.')
ruckusZDWLANAuthResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 27), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAuthResp.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAuthResp.setDescription('Authentication response.')
ruckusZDWLANAuthSuccessTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 28), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAuthSuccessTotal.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAuthSuccessTotal.setDescription('Accumulated total number of success out of Wireless authentication attempts.')
ruckusZDWLANAuthFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 29), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAuthFail.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAuthFail.setDescription('Authentication fail.')
ruckusZDWLANAssocTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 30), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAssocTotal.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAssocTotal.setDescription('Accumulated total number of access of wireless terminal on this WLAN, or Association request.')
ruckusZDWLANAssocResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 31), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAssocResp.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAssocResp.setDescription('Association response.')
ruckusZDWLANReassocReq = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 32), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANReassocReq.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANReassocReq.setDescription('Reassociation request.')
ruckusZDWLANReassocResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 33), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANReassocResp.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANReassocResp.setDescription('Reassociation response.')
ruckusZDWLANAssocSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 34), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAssocSuccess.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAssocSuccess.setDescription('Association success.')
ruckusZDWLANAssocFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 35), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAssocFail.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAssocFail.setDescription('Association fail.')
ruckusZDWLANAssocDenied = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 36), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAssocDenied.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAssocDenied.setDescription('Association denied.')
ruckusZDWLANDiassocAbnormal = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 37), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDiassocAbnormal.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDiassocAbnormal.setDescription('Disassociation request due to abnormal causes.')
ruckusZDWLANDiassocCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 38), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDiassocCapacity.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDiassocCapacity.setDescription('Disassociation request due to device capacity.')
ruckusZDWLANDiassocLeave = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 39), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDiassocLeave.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDiassocLeave.setDescription('Disassociation request due to client leave.')
ruckusZDWLANDiassocMisc = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 40), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDiassocMisc.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDiassocMisc.setDescription('Disassociation request due to other reasons.')
ruckusZDWLANRxByteRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 41), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRxByteRate.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRxByteRate.setDescription('Received rate byte/s in last 5 minutes .')
ruckusZDWLANTxByteRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 42), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANTxByteRate.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANTxByteRate.setDescription('Transmitted rate byte/s in last 5 minutes .')
ruckusZDWLANRxDataFrameOnLan = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 43), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRxDataFrameOnLan.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRxDataFrameOnLan.setDescription('Received Data Frame Number on Lan interface .')
ruckusZDWLANRxByteOnLan = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 44), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRxByteOnLan.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRxByteOnLan.setDescription('Received Data Bytes on Lan interface .')
ruckusZDWLANTxByteOnLan = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 45), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANTxByteOnLan.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANTxByteOnLan.setDescription('Transmitted Data Bytes on Lan interface .')
ruckusZDWLANDownDropFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 46), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDownDropFrame.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDownDropFrame.setDescription('Drop Frame Number on Downlink of Wireless.')
ruckusZDWLANDownRetxFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 47), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDownRetxFrame.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDownRetxFrame.setDescription('Retransmitted Frame Number on Downlink of Wireless.')
ruckusZDWLANDownTotalFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 48), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDownTotalFrame.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDownTotalFrame.setDescription('Total Frame Number on Downlink of Wireless.')
ruckusZDWLANDownTotalErrFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 49), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANDownTotalErrFrame.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANDownTotalErrFrame.setDescription('Total Error Frame Number on Downlink of Wireless.')
ruckusZDWLANUpTotalFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 50), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANUpTotalFrame.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANUpTotalFrame.setDescription('Total Frame Number on Uplink of Wireless.')
ruckusZDWLANUpDropFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 51), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANUpDropFrame.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANUpDropFrame.setDescription('Drop Frame Number on Uplink of Wireless.')
ruckusZDWLANUpRetxFrame = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 52), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANUpRetxFrame.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANUpRetxFrame.setDescription('Retransmitted Frame Number on Uplink of Wireless.')
ruckusZDWLANNAME = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 1, 1, 1, 108), RuckusSSID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANNAME.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANNAME.setDescription('WLAN NAME.')
ruckusZDWLANAPInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2))
ruckusZDWLANAPTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1), )
if mibBuilder.loadTexts: ruckusZDWLANAPTable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPTable.setDescription('ZD WLAN AP table.')
ruckusZDWLANAPEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1), ).setIndexNames((0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANAPMacAddr"))
if mibBuilder.loadTexts: ruckusZDWLANAPEntry.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEntry.setDescription('Specifies each ZD WLAN AP entry.')
ruckusZDWLANAPMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMacAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMacAddr.setDescription('MAC address.')
ruckusZDWLANAPDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPDescription.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPDescription.setDescription('Description.')
ruckusZDWLANAPStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("disconnected", 0), ("connected", 1), ("approvalPending", 2), ("upgradingFirmware", 3), ("provisioning", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPStatus.setDescription('The connection status with ZD.')
ruckusZDWLANAPModel = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 4), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPModel.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPModel.setDescription('Model name.')
ruckusZDWLANAPSerialNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPSerialNumber.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPSerialNumber.setDescription('Serial number.')
ruckusZDWLANAPUptime = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 6), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPUptime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPUptime.setDescription('Up time.')
ruckusZDWLANAPSWversion = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPSWversion.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPSWversion.setDescription('Software version.')
ruckusZDWLANAPHWversion = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 8), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPHWversion.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPHWversion.setDescription('Hardware version.')
ruckusZDWLANAPIPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 10), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIPAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIPAddr.setDescription('IP address.')
ruckusZDWLANAPNumRadios = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 13), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPNumRadios.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPNumRadios.setDescription('Number of radios.')
ruckusZDWLANAPNumVAP = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 14), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPNumVAP.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPNumVAP.setDescription('Number of APs.')
ruckusZDWLANAPNumSta = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 15), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPNumSta.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPNumSta.setDescription('Total number of authenticated terminal which is using currently on this AP.')
ruckusZDWLANAPNumRogues = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 16), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPNumRogues.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPNumRogues.setDescription('Number of rogue devices.')
ruckusZDWLANAPConnectionMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("layer2", 0), ("layer3", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPConnectionMode.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPConnectionMode.setDescription('The connection mode with ZD.')
ruckusZDWLANAPMeshEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMeshEnable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMeshEnable.setDescription('Mesh enable.')
ruckusZDWLANAPMeshHops = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 19), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMeshHops.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMeshHops.setDescription('Number of mesh hops.')
ruckusZDWLANAPMeshType = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("unknown", 0), ("root", 1), ("mesh", 2), ("forming", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMeshType.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMeshType.setDescription('Mesh type.')
ruckusZDWLANAPLANStatsRXByte = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXByte.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXByte.setDescription('Bytes received on LAN port.')
ruckusZDWLANAPLANStatsRXPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPkt.setDescription('Packets received on LAN port.')
ruckusZDWLANAPLANStatsRXPktErr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPktErr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPktErr.setDescription('Packets received erroneously on LAN port.')
ruckusZDWLANAPLANStatsRXPKTSucc = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPKTSucc.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPKTSucc.setDescription('Packets received successfully on LAN port.')
ruckusZDWLANAPLANStatsTXByte = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXByte.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXByte.setDescription('Bytes transmitted on LAN port.')
ruckusZDWLANAPLANStatsTXPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXPkt.setDescription('Packets transmitted on LAN port.')
ruckusZDWLANAPMemUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 27), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMemUtil.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMemUtil.setDescription('Memory utilization percentage.')
ruckusZDWLANAPMemTotal = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 28), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMemTotal.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMemTotal.setDescription('Total memory in KB.')
ruckusZDWLANAPCPUUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPCPUUtil.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPCPUUtil.setDescription('CPU utilization percentage.')
ruckusZDWLANAPFWSize = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 30), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPFWSize.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPFWSize.setDescription('Firmware partition size in byte.')
ruckusZDWLANAPFWAvail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 31), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPFWAvail.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPFWAvail.setDescription('Firmware partition available size in byte.')
ruckusZDWLANAPMultipleVlanCapability = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 32), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMultipleVlanCapability.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMultipleVlanCapability.setDescription('AP support multiple vlan capability.')
ruckusZDWLANAP11bCapable = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 36), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAP11bCapable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAP11bCapable.setDescription('Support 802.11b.')
ruckusZDWLANAP11gCapable = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 37), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAP11gCapable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAP11gCapable.setDescription('Support 802.11g.')
ruckusZDWLANAPMultiModeAccessStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 38), TruthValue().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("false", 0), ("true", 1)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusZDWLANAPMultiModeAccessStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMultiModeAccessStatus.setDescription('Support simutaneously connections from 11a/b/g clients.')
ruckusZDWLANAPEthStateChange = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 39), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthStateChange.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthStateChange.setDescription('Ethernet port up/down count.')
ruckusZDWLANAPSyncConf = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 41), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPSyncConf.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPSyncConf.setDescription('Manually push configuration to this AP.')
ruckusZDWLANAPUpgrade = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 42), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPUpgrade.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPUpgrade.setDescription('Upgrade this AP.')
ruckusZDWLANAPFirstJoinTime = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 43), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPFirstJoinTime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPFirstJoinTime.setDescription('Time of first join.')
ruckusZDWLANAPLastBootTime = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 44), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLastBootTime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLastBootTime.setDescription('Time of last boot.')
ruckusZDWLANAPLastUpgradeTime = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 45), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLastUpgradeTime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLastUpgradeTime.setDescription('Time of last firmware upgrade.')
ruckusZDWLANAPLastConfSyncTime = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 46), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLastConfSyncTime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLastConfSyncTime.setDescription('Time of last configuration synchronization.')
ruckusZDWLANAPLANStatsRXPKTBcast = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 47), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPKTBcast.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPKTBcast.setDescription('Broadcast packets received successfully on LAN port.')
ruckusZDWLANAPLANStatsRXPKTMcast = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 48), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPKTMcast.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPKTMcast.setDescription('Multicast packets received successfully on LAN port.')
ruckusZDWLANAPLANStatsRXPKTUcast = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 49), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPKTUcast.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXPKTUcast.setDescription('Unicast packets received successfully on LAN port.')
ruckusZDWLANAPLANStatsTXPKTBcast = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 50), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXPKTBcast.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXPKTBcast.setDescription('Broadcast packets transmitted successfully on LAN port.')
ruckusZDWLANAPLANStatsTXPKTMcast = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 51), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXPKTMcast.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXPKTMcast.setDescription('Multicast packets transmitted successfully on LAN port.')
ruckusZDWLANAPLANStatsTXPKTUcast = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 52), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXPKTUcast.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXPKTUcast.setDescription('Unicast packets transmitted successfully on LAN port.')
ruckusZDWLANAPLANStatsDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 53), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsDropped.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsDropped.setDescription('Number of packets dropped on LAN port.')
ruckusZDWLANAPMeshUpPortCntUpdown = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 54), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMeshUpPortCntUpdown.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMeshUpPortCntUpdown.setDescription('updown count of AP as a up port node in mesh mode.')
ruckusZDWLANAPMeshDownPortCntUpdown = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 55), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMeshDownPortCntUpdown.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMeshDownPortCntUpdown.setDescription('updown count of AP as a down port node in mesh mode.')
ruckusZDWLANAPTxFrameDropped = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 57), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPTxFrameDropped.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPTxFrameDropped.setDescription('Droped Frame count of AP wireless transmitted.')
ruckusZDWLANAPTxFrameError = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 58), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPTxFrameError.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPTxFrameError.setDescription('Error Frame count of AP wireless transmitted.')
ruckusZDWLANAPCoverageTech = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 59), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("indoor", 1), ("indoor-distribute", 2), ("outdoor", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPCoverageTech.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPCoverageTech.setDescription("AP's coverage technology.")
ruckusZDWLANAPStaTxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 61), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPStaTxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPStaTxBytes.setDescription('Client send data to this AP.(kbytes) ')
ruckusZDWLANAPStaRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 62), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPStaRxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPStaRxBytes.setDescription('Client receive data from this AP.(kbytes)')
ruckusZDWLANAPNetmask = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 100), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPNetmask.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPNetmask.setDescription('Netmask address.')
ruckusZDWLANAPGateway = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 101), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPGateway.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPGateway.setDescription('Gateway address.')
ruckusZDWLANAPDNS1 = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 105), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPDNS1.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPDNS1.setDescription('Primary DNS address.')
ruckusZDWLANAPDNS2 = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 106), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPDNS2.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPDNS2.setDescription('Sencond DNS address.')
ruckusZDWLANAPTotalUser = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 110), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPTotalUser.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPTotalUser.setDescription('Total number of concurrent users on this AP.')
ruckusZDWLANAPLANStatsRXByteRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 111), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXByteRate.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsRXByteRate.setDescription('Bytes rate received on LAN port each 90s.')
ruckusZDWLANAPLANStatsTXByteRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 1, 1, 112), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXByteRate.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPLANStatsTXByteRate.setDescription('Bytes rate transmitted on LAN port each 90s.')
ruckusZDWLANAPRadioStatsTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2), )
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTable.setDescription('ZD WLAN AP Radio table.')
ruckusZDWLANAPRadioStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1), ).setIndexNames((0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANAPRadioStatsAPMacAddr"), (0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANAPRadioStatsRadioIndex"))
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsEntry.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsEntry.setDescription('Specifies each ZD WLAN AP Radio entry.')
ruckusZDWLANAPRadioStatsAPMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAPMacAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAPMacAddr.setDescription('AP MAC address.')
ruckusZDWLANAPRadioStatsRadioIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 2), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRadioIndex.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRadioIndex.setDescription('Radio index of the AP.')
ruckusZDWLANAPRadioStatsRadioType = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("radio11bg", 0), ("radio11a", 1), ("radio11ng", 2), ("radio11na", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRadioType.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRadioType.setDescription('Radio type.')
ruckusZDWLANAPRadioStatsChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsChannel.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsChannel.setDescription('Radio channel.')
ruckusZDWLANAPRadioStatsTxPower = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("full", 0), ("half", 1), ("quarter", 2), ("eighth", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxPower.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxPower.setDescription('Transmit power of radio.')
ruckusZDWLANAPRadioStatsMeshEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 6), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsMeshEnable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsMeshEnable.setDescription('Mesh enable.')
ruckusZDWLANAPRadioStatsNumVAP = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsNumVAP.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsNumVAP.setDescription('Number of APs.')
ruckusZDWLANAPRadioStatsNumSta = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 8), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsNumSta.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsNumSta.setDescription('Number of stations.')
ruckusZDWLANAPRadioStatsAvgStaRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAvgStaRSSI.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAvgStaRSSI.setDescription('Average client RSSI.')
ruckusZDWLANAPRadioStatsRxPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRxPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRxPkts.setDescription('Received packets.')
ruckusZDWLANAPRadioStatsRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRxBytes.setDescription('Received bytes.')
ruckusZDWLANAPRadioStatsRxMulticast = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRxMulticast.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRxMulticast.setDescription('Received multicast packets.')
ruckusZDWLANAPRadioStatsTxPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxPkts.setDescription('Transmitted packets.')
ruckusZDWLANAPRadioStatsTxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxBytes.setDescription('Transmitted bytes.')
ruckusZDWLANAPRadioStatsTxMulticast = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxMulticast.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxMulticast.setDescription('Transmitted multicast packets.')
ruckusZDWLANAPRadioStatsTxFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxFail.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxFail.setDescription('Transmitted fail packets.')
ruckusZDWLANAPRadioStatsTxRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxRetries.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxRetries.setDescription('Transmitted retry packets.')
ruckusZDWLANAPRadioStatsPowerMgmt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 18), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsPowerMgmt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsPowerMgmt.setDescription('Power management enable.')
ruckusZDWLANAPRadioStatsMaxSta = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 19), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsMaxSta.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsMaxSta.setDescription('Number of max stations allowed.')
ruckusZDWLANAPRadioStatsFrameErrorRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 20), Unsigned32()).setUnits('1/10000').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsFrameErrorRate.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsFrameErrorRate.setDescription('Frame error rate.')
ruckusZDWLANAPRadioStatsFrameRetryRate = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 21), Unsigned32()).setUnits('1/10000').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsFrameRetryRate.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsFrameRetryRate.setDescription('Frame retry rate.')
ruckusZDWLANAPRadioStatsMonitoredTime = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 22), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsMonitoredTime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsMonitoredTime.setDescription('Monitored time.')
ruckusZDWLANAPRadioStatsTotalAssocTime = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 24), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTotalAssocTime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTotalAssocTime.setDescription('Client total association time.')
ruckusZDWLANAPRadioStatsAuthReq = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 25), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAuthReq.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAuthReq.setDescription('Authentication request.')
ruckusZDWLANAPRadioStatsAuthResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 26), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAuthResp.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAuthResp.setDescription('Authentication response.')
ruckusZDWLANAPRadioStatsAuthSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 27), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAuthSuccess.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAuthSuccess.setDescription('Authentication success.')
ruckusZDWLANAPRadioStatsAuthFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 28), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAuthFail.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAuthFail.setDescription('Authentication fail.')
ruckusZDWLANAPRadioStatsAssocReq = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 29), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocReq.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocReq.setDescription('Association request.')
ruckusZDWLANAPRadioStatsAssocResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 30), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocResp.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocResp.setDescription('Association response.')
ruckusZDWLANAPRadioStatsReassocReq = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 31), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsReassocReq.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsReassocReq.setDescription('Reassociation request.')
ruckusZDWLANAPRadioStatsReassocResp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 32), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsReassocResp.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsReassocResp.setDescription('Reassociation response.')
ruckusZDWLANAPRadioStatsAssocSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 33), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocSuccess.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocSuccess.setDescription('Association success.')
ruckusZDWLANAPRadioStatsAssocFail = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 34), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocFail.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocFail.setDescription('Association fail.')
ruckusZDWLANAPRadioStatsAssocDenied = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 35), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocDenied.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAssocDenied.setDescription('Association denied.')
ruckusZDWLANAPRadioStatsDiassocAbnormal = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 36), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsDiassocAbnormal.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsDiassocAbnormal.setDescription('Disassociation request due to abnormal causes.')
ruckusZDWLANAPRadioStatsDiassocCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 37), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsDiassocCapacity.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsDiassocCapacity.setDescription('Disassociation request due to device capacity.')
ruckusZDWLANAPRadioStatsDiassocLeave = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 38), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsDiassocLeave.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsDiassocLeave.setDescription('Disassociation request due to client leave.')
ruckusZDWLANAPRadioStatsDiassocMisc = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 39), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsDiassocMisc.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsDiassocMisc.setDescription('Disassociation request due to other reasons.')
ruckusZDWLANAPRadioStatsResourceUtil = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 40), Unsigned32()).setUnits('percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsResourceUtil.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsResourceUtil.setDescription('Resource utilization.')
ruckusZDWLANAPRadioStatsRxSignalFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 41), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRxSignalFrm.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRxSignalFrm.setDescription('Received signal frames.')
ruckusZDWLANAPRadioStatsTxSignalFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 42), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxSignalFrm.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTxSignalFrm.setDescription('Transmitted signal frames.')
ruckusZDWLANAPRadioStatsTotalSignalFrm = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 43), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTotalSignalFrm.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsTotalSignalFrm.setDescription('Total signal frames.')
ruckusZDWLANAPRadioStatsAntennaGain = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 44), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAntennaGain.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsAntennaGain.setDescription('Get antenna gain.')
ruckusZDWLANAPRadioStatsBeaconPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 45), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsBeaconPeriod.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsBeaconPeriod.setDescription('Get beacon period.')
ruckusZDWLANAPRadioStatsRTSThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 46), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRTSThreshold.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsRTSThreshold.setDescription('Get RTS threshold.')
ruckusZDWLANAPRadioStatsFragThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 2, 1, 47), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsFragThreshold.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPRadioStatsFragThreshold.setDescription('Get fragment threshold.')
ruckusZDWLANStaInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3))
ruckusZDWLANStaTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1), )
if mibBuilder.loadTexts: ruckusZDWLANStaTable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaTable.setDescription('ZD WLAN Station table.')
ruckusZDWLANStaEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1), ).setIndexNames((0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANStaMacAddr"))
if mibBuilder.loadTexts: ruckusZDWLANStaEntry.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaEntry.setDescription('Specifies each ZD WLAN Station entry.')
ruckusZDWLANStaMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaMacAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaMacAddr.setDescription("The station's MAC Address.")
ruckusZDWLANStaAPMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaAPMacAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaAPMacAddr.setDescription('The MAC address of the associated AP.')
ruckusZDWLANStaBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 3), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaBSSID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaBSSID.setDescription('BSSID of WLAN.')
ruckusZDWLANStaSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 4), RuckusSSID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaSSID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaSSID.setDescription('SSID.')
ruckusZDWLANStaUser = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaUser.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaUser.setDescription('Logined username of the client.')
ruckusZDWLANStaRadioType = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4))).clone(namedValues=NamedValues(("radio11a", 0), ("radio11b", 1), ("radio11g", 2), ("radio11ng", 3), ("radio11na", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaRadioType.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaRadioType.setDescription('Radio type.')
ruckusZDWLANStaChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 7), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaChannel.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaChannel.setDescription('Radio channel.')
ruckusZDWLANStaIPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 8), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaIPAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaIPAddr.setDescription('IP address.')
ruckusZDWLANStaAvgRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaAvgRSSI.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaAvgRSSI.setDescription('Averaged RSSI.')
ruckusZDWLANStaRxPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaRxPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaRxPkts.setDescription('Received packets.')
ruckusZDWLANStaRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaRxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaRxBytes.setDescription('Received bytes.')
ruckusZDWLANStaTxPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaTxPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaTxPkts.setDescription('Transmitted packets.')
ruckusZDWLANStaTxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaTxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaTxBytes.setDescription('Transmitted bytes.')
ruckusZDWLANStaRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaRetries.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaRetries.setDescription('Transmitted retries.')
ruckusZDWLANStaAssocTime = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 15), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaAssocTime.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaAssocTime.setDescription('The time client has been associated.')
ruckusZDWLANStaRxError = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaRxError.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaRxError.setDescription('Received error packets.')
ruckusZDWLANStaTxSuccess = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaTxSuccess.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaTxSuccess.setDescription('Successful transmitted packets.')
ruckusZDWLANSta11bgReassoc = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANSta11bgReassoc.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANSta11bgReassoc.setDescription('Reassociate count on B/G RF.')
ruckusZDWLANStaAssocTimestamp = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 19), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaAssocTimestamp.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaAssocTimestamp.setDescription('The timestamp client associated.')
ruckusZDWLANStaRetryBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaRetryBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaRetryBytes.setDescription('Transmitted retry bytes.')
ruckusZDWLANStaSNR = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 21), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaSNR.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaSNR.setDescription("Station's Signal to Noise Ratio.")
ruckusZDWLANStaRxDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaRxDrop.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaRxDrop.setDescription('Drop transmitted packets.')
ruckusZDWLANStaTxDrop = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaTxDrop.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaTxDrop.setDescription('Drop received packets.')
ruckusZDWLANStaTxError = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaTxError.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaTxError.setDescription('Transmitted error packets.')
ruckusZDWLANStaVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 30), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaVlanID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaVlanID.setDescription('VLAN ID of the station. 1 for not specified.')
ruckusZDWLANStaAuthMode = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 80), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaAuthMode.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaAuthMode.setDescription('Authentication mode of authenticated terminal.')
ruckusZDWLANStaSignalStrength = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 3, 1, 1, 81), Integer32()).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANStaSignalStrength.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANStaSignalStrength.setDescription('Signal strength')
ruckusZDWLANRogueInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4))
ruckusZDWLANRogueTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1), )
if mibBuilder.loadTexts: ruckusZDWLANRogueTable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueTable.setDescription('ZD WLAN rogue device table.')
ruckusZDWLANRogueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: ruckusZDWLANRogueEntry.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueEntry.setDescription('Specifies each ZD WLAN rogue device entry.')
ruckusZDWLANRogueMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRogueMacAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueMacAddr.setDescription("Rogue device's MAC Address.")
ruckusZDWLANRogueSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1, 2), RuckusSSID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRogueSSID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueSSID.setDescription('SSID.')
ruckusZDWLANRogueRadioType = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("radio11bg", 0), ("radio11a", 1), ("radio11ng", 2), ("radio11na", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRogueRadioType.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueRadioType.setDescription('Radio type.')
ruckusZDWLANRogueChannel = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1, 4), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRogueChannel.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueChannel.setDescription('Radio channel.')
ruckusZDWLANRogueRSSI = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRogueRSSI.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueRSSI.setDescription('RSSI.')
ruckusZDWLANRogueType = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("ap", 0), ("ad-hoc", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRogueType.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueType.setDescription('The type of the rogue device.')
ruckusZDWLANRogueEncrypted = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("open", 0), ("encrypted", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRogueEncrypted.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueEncrypted.setDescription('Encryption mode.')
ruckusZDWLANRogueSignalStrength = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 4, 1, 1, 11), Integer32()).setUnits('dBm').setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANRogueSignalStrength.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANRogueSignalStrength.setDescription('Signal strength')
ruckusZDWLANVapTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3), )
if mibBuilder.loadTexts: ruckusZDWLANVapTable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapTable.setDescription('ZD WLAN AP table per each WLAN.')
ruckusZDWLANVapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1), ).setIndexNames((0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANVapBSSID"))
if mibBuilder.loadTexts: ruckusZDWLANVapEntry.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapEntry.setDescription('Specifies each ZD WLAN vap entry.')
ruckusZDWLANVapBSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapBSSID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapBSSID.setDescription("The VAP's BSSID ,it is a MAC Address.")
ruckusZDWLANVapPAPAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 2), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapPAPAddr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapPAPAddr.setDescription("The MAC address of the Parent's AP.")
ruckusZDWLANVapSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 3), RuckusSSID()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapSSID.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapSSID.setDescription('SSID of VAP.')
ruckusZDWLANVapLanRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapLanRxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapLanRxBytes.setDescription('Received bytes on LAN in VAP .')
ruckusZDWLANVapLanTxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapLanTxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapLanTxBytes.setDescription('Transmitted bytes on LAN in VAP.')
ruckusZDWLANVapWlanRxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxBytes.setDescription('Received bytes on WLAN in VAP .')
ruckusZDWLANVapWlanTxBytes = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxBytes.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxBytes.setDescription('Transmitted bytes on WLAN in VAP.')
ruckusZDWLANVapWlanRxErrorPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxErrorPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxErrorPkt.setDescription('Receiveed error packet on WLAN in VAP.')
ruckusZDWLANVapWlanRxUnicastPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxUnicastPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxUnicastPkt.setDescription('Receiveed unicast packet on WLAN in VAP.')
ruckusZDWLANVapWlanTxUnicastPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxUnicastPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxUnicastPkt.setDescription('Transmitted unicast packet on WLAN in VAP.')
ruckusZDWLANVapWlanRxPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxPkt.setDescription('Received packet on WLAN in VAP.')
ruckusZDWLANVapWlanRxDropPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxDropPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanRxDropPkt.setDescription('Received dropped packet on WLAN in VAP.')
ruckusZDWLANVapWlanTxErrPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 16), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxErrPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxErrPkt.setDescription('Transmitted error packet on WLAN in VAP.')
ruckusZDWLANVapWlanTxPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 17), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxPkt.setDescription('Transmitted packet on WLAN in VAP.')
ruckusZDWLANVapWlanTxDropPkt = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 3, 1, 18), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxDropPkt.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANVapWlanTxDropPkt.setDescription('Transmitted dropped packet on WLAN in VAP.')
ruckusZDWLANIfTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4), )
if mibBuilder.loadTexts: ruckusZDWLANIfTable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANIfTable.setDescription('ZD WLAN AP Interface table per each AP.')
ruckusZDWLANIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1), ).setIndexNames((0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANAPMac"), (0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANAPIfIndex"))
if mibBuilder.loadTexts: ruckusZDWLANIfEntry.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANIfEntry.setDescription('Specifies each ZD WLAN vap entry.')
ruckusZDWLANAPMac = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMac.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMac.setDescription("The AP's Mac Address.")
ruckusZDWLANAPIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 2), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfIndex.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfIndex.setDescription('The index of Interface in AP .')
ruckusZDWLANAPIfDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfDescr.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfDescr.setDescription('The Description information for this interface.')
ruckusZDWLANAPIfType = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 4), IANAifType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfType.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfType.setDescription('The type of interface.')
ruckusZDWLANAPIfMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfMtu.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfMtu.setDescription('The size of the largest packet which can be sent/received.')
ruckusZDWLANAPIfSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 6), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfSpeed.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfSpeed.setDescription("An estimate of the interface's current bandwidth in bits.")
ruckusZDWLANAPIfPhysAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfPhysAddress.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfPhysAddress.setDescription("The interface's mac address.")
ruckusZDWLANAPIfAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfAdminStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfAdminStatus.setDescription('The desired state of the interface: up(1), down(2) ,testing(3) .')
ruckusZDWLANAPIfOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("up", 1), ("down", 2), ("testing", 3), ("unknown", 4), ("dormant", 5), ("notPresent", 6), ("lowerLayerDown", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfOperStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfOperStatus.setDescription('The current operational state of the interface: up(1), down(2), testing(3), unknown(4), dormant(5) , notPresent(6), lowerLayerDown(7).')
ruckusZDWLANAPIfInOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfInOctets.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfInOctets.setDescription('The total number of octets received on the interface.')
ruckusZDWLANAPIfInUcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfInUcastPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfInUcastPkts.setDescription('The total number of unicast packets received on the interface.')
ruckusZDWLANAPIfInNUcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfInNUcastPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfInNUcastPkts.setDescription('The total number of multicast and broadcast packets received on the interface.')
ruckusZDWLANAPIfInDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfInDiscards.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfInDiscards.setDescription('The number of inbound packets which were chosen to be discarded even though no errors had been detected .')
ruckusZDWLANAPIfInErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfInErrors.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfInErrors.setDescription('The number of inbound packets that contained errors.')
ruckusZDWLANAPIfInUnknownProtos = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfInUnknownProtos.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfInUnknownProtos.setDescription('The number of packets received via the interface which were discarded because of an unknown or unsupported protocol.')
ruckusZDWLANAPIfOutOctets = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutOctets.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutOctets.setDescription('The total number of octets transmitted out of the interface.')
ruckusZDWLANAPIfOutUcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutUcastPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutUcastPkts.setDescription('The total number of unicast packets transmitted out of the interface.')
ruckusZDWLANAPIfOutNUcastPkts = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutNUcastPkts.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutNUcastPkts.setDescription('The total number of multicast and broadcast packets transmitted out of the interface.')
ruckusZDWLANAPIfOutDiscards = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutDiscards.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutDiscards.setDescription('The number of outbound packets which were chosen to be discarded even though no errors had been detected.')
ruckusZDWLANAPIfOutErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutErrors.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfOutErrors.setDescription('The number of outbound packets that could not be transmitted because of errors.')
ruckusZDWLANAPIfName = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 21), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfName.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfName.setDescription('The name of interface.')
ruckusZDWLANAPIfNameDefined = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 4, 1, 22), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPIfNameDefined.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPIfNameDefined.setDescription('The alias of interface.')
ruckusZDWLANAPEthStatusTable = MibTable((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8), )
if mibBuilder.loadTexts: ruckusZDWLANAPEthStatusTable.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthStatusTable.setDescription('ZD WLAN AP Ethernet Interface status table per each AP.')
ruckusZDWLANAPEthStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1), ).setIndexNames((0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANAPMacAddress"), (0, "RUCKUS-ZD-WLAN-MIB", "ruckusZDWLANAPEthPortId"))
if mibBuilder.loadTexts: ruckusZDWLANAPEthStatusEntry.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthStatusEntry.setDescription('Specifies each ZD WLAN AP ethernet interface status entry.')
ruckusZDWLANAPMacAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 1), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPMacAddress.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPMacAddress.setDescription("The AP's Mac Address.")
ruckusZDWLANAPEthPortId = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthPortId.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthPortId.setDescription('The ap ethernet port id.')
ruckusZDWLANAPEthIfname = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 5), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthIfname.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthIfname.setDescription('The name of interface.')
ruckusZDWLANAPEthDot1xStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("auth", 1), ("supp", 2), ("none", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthDot1xStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthDot1xStatus.setDescription('The ap ethernet port Dot1xStatus.')
ruckusZDWLANAPEthLogicalStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthLogicalStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthLogicalStatus.setDescription('The ap ethernet port logical status.')
ruckusZDWLANAPEthPhyStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("up", 1), ("down", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthPhyStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthPhyStatus.setDescription('The ap ethernet port physical status.')
ruckusZDWLANAPEthPhyIfSpeed = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthPhyIfSpeed.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthPhyIfSpeed.setDescription("The ap ethernet interface's current bandwidth in Mbits,.")
ruckusZDWLANAPEthPhyLinkStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("full", 1), ("half", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthPhyLinkStatus.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthPhyLinkStatus.setDescription('The ap ethernet port physical link status.')
ruckusZDWLANAPEthLabel = MibTableColumn((1, 3, 6, 1, 4, 1, 25053, 1, 2, 2, 1, 1, 2, 8, 1, 11), OctetString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ruckusZDWLANAPEthLabel.setStatus('current')
if mibBuilder.loadTexts: ruckusZDWLANAPEthLabel.setDescription('The ap ethernet port label.')
mibBuilder.exportSymbols("RUCKUS-ZD-WLAN-MIB", ruckusZDWLANAPRadioStatsTxPkts=ruckusZDWLANAPRadioStatsTxPkts, ruckusZDWLANTxBytes=ruckusZDWLANTxBytes, ruckusZDWLANAPTxFrameDropped=ruckusZDWLANAPTxFrameDropped, ruckusZDWLANAPLANStatsRXPkt=ruckusZDWLANAPLANStatsRXPkt, ruckusZDWLANStaBSSID=ruckusZDWLANStaBSSID, ruckusZDWLANAPNetmask=ruckusZDWLANAPNetmask, ruckusZDWLANStaTable=ruckusZDWLANStaTable, ruckusZDWLANAssocFail=ruckusZDWLANAssocFail, ruckusZDWLANIfTable=ruckusZDWLANIfTable, ruckusZDWLANAPRadioStatsDiassocLeave=ruckusZDWLANAPRadioStatsDiassocLeave, ruckusZDWLANAPRadioStatsNumSta=ruckusZDWLANAPRadioStatsNumSta, ruckusZDWLANStaSignalStrength=ruckusZDWLANStaSignalStrength, ruckusZDWLANAPMeshUpPortCntUpdown=ruckusZDWLANAPMeshUpPortCntUpdown, ruckusZDWLANAP11bCapable=ruckusZDWLANAP11bCapable, ruckusZDWLANAPUptime=ruckusZDWLANAPUptime, ruckusZDWLANDescription=ruckusZDWLANDescription, ruckusZDWLANSta11bgReassoc=ruckusZDWLANSta11bgReassoc, ruckusZDWLANSSIDBcastDisable=ruckusZDWLANSSIDBcastDisable, ruckusZDWLANAPLastConfSyncTime=ruckusZDWLANAPLastConfSyncTime, ruckusZDWLANAPMacAddr=ruckusZDWLANAPMacAddr, ruckusZDWLANAPStaRxBytes=ruckusZDWLANAPStaRxBytes, ruckusZDWLANAPRadioStatsTable=ruckusZDWLANAPRadioStatsTable, ruckusZDWLANAPRadioStatsAuthResp=ruckusZDWLANAPRadioStatsAuthResp, ruckusZDWLANDiassocCapacity=ruckusZDWLANDiassocCapacity, ruckusZDWLANAPRadioStatsRxMulticast=ruckusZDWLANAPRadioStatsRxMulticast, ruckusZDWLANStaEntry=ruckusZDWLANStaEntry, ruckusZDWLANNAME=ruckusZDWLANNAME, ruckusZDWLANStaRxPkts=ruckusZDWLANStaRxPkts, ruckusZDWLANStaRxBytes=ruckusZDWLANStaRxBytes, ruckusZDWLANAPRadioStatsRxPkts=ruckusZDWLANAPRadioStatsRxPkts, ruckusZDWLANAPModel=ruckusZDWLANAPModel, ruckusZDWLANAPLANStatsRXByte=ruckusZDWLANAPLANStatsRXByte, ruckusZDWLANAPEthLogicalStatus=ruckusZDWLANAPEthLogicalStatus, ruckusZDWLANAPRadioStatsAntennaGain=ruckusZDWLANAPRadioStatsAntennaGain, ruckusZDWLANStaRxDrop=ruckusZDWLANStaRxDrop, ruckusZDWLANStaTxError=ruckusZDWLANStaTxError, ruckusZDWLANAPEthDot1xStatus=ruckusZDWLANAPEthDot1xStatus, ruckusZDWLANAuthentication=ruckusZDWLANAuthentication, ruckusZDWLANAPRadioStatsAssocReq=ruckusZDWLANAPRadioStatsAssocReq, ruckusZDWLANRogueRadioType=ruckusZDWLANRogueRadioType, ruckusZDWLANAPIfIndex=ruckusZDWLANAPIfIndex, ruckusZDWLANAPMeshHops=ruckusZDWLANAPMeshHops, ruckusZDWLANMIB=ruckusZDWLANMIB, ruckusZDWLANAPRadioStatsMaxSta=ruckusZDWLANAPRadioStatsMaxSta, ruckusZDWLANVapEntry=ruckusZDWLANVapEntry, ruckusZDWLANAPMemTotal=ruckusZDWLANAPMemTotal, ruckusZDWLANAPRadioStatsFrameRetryRate=ruckusZDWLANAPRadioStatsFrameRetryRate, ruckusZDWLANIfEntry=ruckusZDWLANIfEntry, ruckusZDWLANAPCoverageTech=ruckusZDWLANAPCoverageTech, ruckusZDWLANAPRadioStatsTxMulticast=ruckusZDWLANAPRadioStatsTxMulticast, ruckusZDWLANRogueEntry=ruckusZDWLANRogueEntry, ruckusZDWLANAPIfOutDiscards=ruckusZDWLANAPIfOutDiscards, ruckusZDWLANAPRadioStatsRadioType=ruckusZDWLANAPRadioStatsRadioType, ruckusZDWLANAPEthLabel=ruckusZDWLANAPEthLabel, ruckusZDWLANStaUser=ruckusZDWLANStaUser, ruckusZDWLANAPUpgrade=ruckusZDWLANAPUpgrade, ruckusZDWLANAPIfAdminStatus=ruckusZDWLANAPIfAdminStatus, ruckusZDWLANAPIfMtu=ruckusZDWLANAPIfMtu, ruckusZDWLANVapBSSID=ruckusZDWLANVapBSSID, ruckusZDWLANAPIfOutOctets=ruckusZDWLANAPIfOutOctets, ruckusZDWLANAPMac=ruckusZDWLANAPMac, ruckusZDWLANAPMeshDownPortCntUpdown=ruckusZDWLANAPMeshDownPortCntUpdown, ruckusZDWLANAPLANStatsTXPkt=ruckusZDWLANAPLANStatsTXPkt, ruckusZDWLANDiassocAbnormal=ruckusZDWLANDiassocAbnormal, ruckusZDWLANIsGuest=ruckusZDWLANIsGuest, ruckusZDWLANAPDNS1=ruckusZDWLANAPDNS1, ruckusZDWLANAP11gCapable=ruckusZDWLANAP11gCapable, ruckusZDWLANAPLastUpgradeTime=ruckusZDWLANAPLastUpgradeTime, ruckusZDWLANRxByteRate=ruckusZDWLANRxByteRate, ruckusZDWLANRogueEncrypted=ruckusZDWLANRogueEncrypted, ruckusZDWLANDiassocMisc=ruckusZDWLANDiassocMisc, ruckusZDWLANAPRadioStatsRadioIndex=ruckusZDWLANAPRadioStatsRadioIndex, ruckusZDWLANRxPkts=ruckusZDWLANRxPkts, ruckusZDWLANVapWlanTxBytes=ruckusZDWLANVapWlanTxBytes, ruckusZDWLANAPNumRadios=ruckusZDWLANAPNumRadios, ruckusZDWLANAPEthStatusEntry=ruckusZDWLANAPEthStatusEntry, ruckusZDWLANStaRetries=ruckusZDWLANStaRetries, ruckusZDWLANAPStaTxBytes=ruckusZDWLANAPStaTxBytes, ruckusZDWLANRxDataFrameOnLan=ruckusZDWLANRxDataFrameOnLan, ruckusZDWLANAPLANStatsRXPKTUcast=ruckusZDWLANAPLANStatsRXPKTUcast, ruckusZDWLANVapWlanTxPkt=ruckusZDWLANVapWlanTxPkt, ruckusZDWLANAPRadioStatsAvgStaRSSI=ruckusZDWLANAPRadioStatsAvgStaRSSI, ruckusZDWLANAPRadioStatsTxRetries=ruckusZDWLANAPRadioStatsTxRetries, ruckusZDWLANAPRadioStatsAssocResp=ruckusZDWLANAPRadioStatsAssocResp, ruckusZDWLANAPRadioStatsAssocFail=ruckusZDWLANAPRadioStatsAssocFail, ruckusZDWLANRateLimitingUp=ruckusZDWLANRateLimitingUp, ruckusZDWLANTunnelWLAN=ruckusZDWLANTunnelWLAN, ruckusZDWLANAPEthStatusTable=ruckusZDWLANAPEthStatusTable, ruckusZDWLANStaAPMacAddr=ruckusZDWLANStaAPMacAddr, ruckusZDWLANAPRadioStatsTxPower=ruckusZDWLANAPRadioStatsTxPower, ruckusZDWLANDiassocLeave=ruckusZDWLANDiassocLeave, ruckusZDWLANAPInfo=ruckusZDWLANAPInfo, ruckusZDWLANAPIfDescr=ruckusZDWLANAPIfDescr, ruckusZDWLANAPLANStatsDropped=ruckusZDWLANAPLANStatsDropped, ruckusZDWLANAPRadioStatsBeaconPeriod=ruckusZDWLANAPRadioStatsBeaconPeriod, ruckusZDWLANDownDropFrame=ruckusZDWLANDownDropFrame, ruckusZDWLANVapWlanRxDropPkt=ruckusZDWLANVapWlanRxDropPkt, ruckusZDWLANAPRadioStatsReassocReq=ruckusZDWLANAPRadioStatsReassocReq, ruckusZDWLANAPIfInNUcastPkts=ruckusZDWLANAPIfInNUcastPkts, ruckusZDWLANAPLANStatsTXByte=ruckusZDWLANAPLANStatsTXByte, ruckusZDWLANAPEthPhyStatus=ruckusZDWLANAPEthPhyStatus, ruckusZDWLANAPCPUUtil=ruckusZDWLANAPCPUUtil, ruckusZDWLANStaInfo=ruckusZDWLANStaInfo, ruckusZDWLANAssocResp=ruckusZDWLANAssocResp, ruckusZDWLANAPStatus=ruckusZDWLANAPStatus, ruckusZDWLANAPRadioStatsAPMacAddr=ruckusZDWLANAPRadioStatsAPMacAddr, ruckusZDWLANAPNumSta=ruckusZDWLANAPNumSta, ruckusZDWLANAPRadioStatsPowerMgmt=ruckusZDWLANAPRadioStatsPowerMgmt, ruckusZDWLANAPIfSpeed=ruckusZDWLANAPIfSpeed, ruckusZDWLANUpDropFrame=ruckusZDWLANUpDropFrame, ruckusZDWLANAPEntry=ruckusZDWLANAPEntry, ruckusZDWLANStaSSID=ruckusZDWLANStaSSID, ruckusZDWLANAPRadioStatsAuthFail=ruckusZDWLANAPRadioStatsAuthFail, ruckusZDWLANAPRadioStatsTxBytes=ruckusZDWLANAPRadioStatsTxBytes, ruckusZDWLANAPRadioStatsMeshEnable=ruckusZDWLANAPRadioStatsMeshEnable, ruckusZDWLANAPIfOperStatus=ruckusZDWLANAPIfOperStatus, ruckusZDWLANAPIfOutNUcastPkts=ruckusZDWLANAPIfOutNUcastPkts, ruckusZDWLANAPEthPortId=ruckusZDWLANAPEthPortId, ruckusZDWLANRogueMacAddr=ruckusZDWLANRogueMacAddr, ruckusZDWLANStaChannel=ruckusZDWLANStaChannel, ruckusZDWLANDownRetxFrame=ruckusZDWLANDownRetxFrame, ruckusZDWLANDownTotalFrame=ruckusZDWLANDownTotalFrame, ruckusZDWLANStaMacAddr=ruckusZDWLANStaMacAddr, ruckusZDWLANUpTotalFrame=ruckusZDWLANUpTotalFrame, ruckusZDWLANTxPkts=ruckusZDWLANTxPkts, ruckusZDWLANRogueType=ruckusZDWLANRogueType, ruckusZDWLANAPIfOutUcastPkts=ruckusZDWLANAPIfOutUcastPkts, ruckusZDWLANRxBytes=ruckusZDWLANRxBytes, ruckusZDWLANAPRadioStatsFragThreshold=ruckusZDWLANAPRadioStatsFragThreshold, ruckusZDWLANStaVlanID=ruckusZDWLANStaVlanID, ruckusZDWLANAPFirstJoinTime=ruckusZDWLANAPFirstJoinTime, ruckusZDWLANAPHWversion=ruckusZDWLANAPHWversion, ruckusZDWLANAPIPAddr=ruckusZDWLANAPIPAddr, ruckusZDWLANStaRxError=ruckusZDWLANStaRxError, ruckusZDWLANAPGateway=ruckusZDWLANAPGateway, ruckusZDWLANAPIfName=ruckusZDWLANAPIfName, ruckusZDWLANRogueSSID=ruckusZDWLANRogueSSID, ruckusZDWLANAPNumVAP=ruckusZDWLANAPNumVAP, ruckusZDWLANEncryption=ruckusZDWLANEncryption, ruckusZDWLANVapSSID=ruckusZDWLANVapSSID, ruckusZDWLANAPSerialNumber=ruckusZDWLANAPSerialNumber, ruckusZDWLANAPRadioStatsAssocDenied=ruckusZDWLANAPRadioStatsAssocDenied, ruckusZDWLANStaRadioType=ruckusZDWLANStaRadioType, ruckusZDWLANAPIfInDiscards=ruckusZDWLANAPIfInDiscards, ruckusZDWLANDownTotalErrFrame=ruckusZDWLANDownTotalErrFrame, ruckusZDWLANAPEthStateChange=ruckusZDWLANAPEthStateChange, ruckusZDWLANAPRadioStatsEntry=ruckusZDWLANAPRadioStatsEntry, ruckusZDWLANEntry=ruckusZDWLANEntry, ruckusZDWLANStaSNR=ruckusZDWLANStaSNR, ruckusZDWLANAPRadioStatsDiassocCapacity=ruckusZDWLANAPRadioStatsDiassocCapacity, ruckusZDWLANAssocTotal=ruckusZDWLANAssocTotal, ruckusZDWLANAPSyncConf=ruckusZDWLANAPSyncConf, ruckusZDWLANVapPAPAddr=ruckusZDWLANVapPAPAddr, ruckusZDWLANStaAvgRSSI=ruckusZDWLANStaAvgRSSI, ruckusZDWLANAuthTotal=ruckusZDWLANAuthTotal, ruckusZDWLANAPRadioStatsTxFail=ruckusZDWLANAPRadioStatsTxFail, ruckusZDWLANAPEthPhyIfSpeed=ruckusZDWLANAPEthPhyIfSpeed, ruckusZDWLANRogueInfo=ruckusZDWLANRogueInfo, ruckusZDWLANAPRadioStatsTotalAssocTime=ruckusZDWLANAPRadioStatsTotalAssocTime, ruckusZDWLANAPLANStatsTXByteRate=ruckusZDWLANAPLANStatsTXByteRate, ruckusZDWLANAPRadioStatsDiassocAbnormal=ruckusZDWLANAPRadioStatsDiassocAbnormal, ruckusZDWLANAPLastBootTime=ruckusZDWLANAPLastBootTime, ruckusZDWLANVapLanTxBytes=ruckusZDWLANVapLanTxBytes, ruckusZDWLANAPRadioStatsRxSignalFrm=ruckusZDWLANAPRadioStatsRxSignalFrm, ruckusZDWLANAPRadioStatsTotalSignalFrm=ruckusZDWLANAPRadioStatsTotalSignalFrm, ruckusZDWLANVapWlanRxErrorPkt=ruckusZDWLANVapWlanRxErrorPkt, ruckusZDWLANAPIfPhysAddress=ruckusZDWLANAPIfPhysAddress, ruckusZDWLANAssocDenied=ruckusZDWLANAssocDenied, ruckusZDWLANStaAssocTimestamp=ruckusZDWLANStaAssocTimestamp, ruckusZDWLANAuthResp=ruckusZDWLANAuthResp, ruckusZDWLANAPLANStatsRXPKTMcast=ruckusZDWLANAPLANStatsRXPKTMcast, ruckusZDWLANReassocResp=ruckusZDWLANReassocResp, ruckusZDWLANVlanID=ruckusZDWLANVlanID, ruckusZDWLANAPDNS2=ruckusZDWLANAPDNS2, ruckusZDWLANAuthFail=ruckusZDWLANAuthFail, ruckusZDWLANAPRadioStatsChannel=ruckusZDWLANAPRadioStatsChannel, ruckusZDWLANNumVAP=ruckusZDWLANNumVAP, ruckusZDWLANTxByteOnLan=ruckusZDWLANTxByteOnLan, ruckusZDWLANStaIPAddr=ruckusZDWLANStaIPAddr, ruckusZDWLANStaTxPkts=ruckusZDWLANStaTxPkts, ruckusZDWLANStaRetryBytes=ruckusZDWLANStaRetryBytes, ruckusZDWLANAPIfNameDefined=ruckusZDWLANAPIfNameDefined, ruckusZDWLANRogueRSSI=ruckusZDWLANRogueRSSI, ruckusZDWLANRxByteOnLan=ruckusZDWLANRxByteOnLan, ruckusZDWLANNumSta=ruckusZDWLANNumSta, ruckusZDWLANAPRadioStatsResourceUtil=ruckusZDWLANAPRadioStatsResourceUtil, ruckusZDWLANVapWlanRxPkt=ruckusZDWLANVapWlanRxPkt, ruckusZDWLANAPRadioStatsMonitoredTime=ruckusZDWLANAPRadioStatsMonitoredTime, ruckusZDWLANAPMeshEnable=ruckusZDWLANAPMeshEnable, PYSNMP_MODULE_ID=ruckusZDWLANMIB, ruckusZDWLANAPRadioStatsAssocSuccess=ruckusZDWLANAPRadioStatsAssocSuccess, ruckusZDWLANAPRadioStatsNumVAP=ruckusZDWLANAPRadioStatsNumVAP, ruckusZDWLANAPLANStatsRXPKTBcast=ruckusZDWLANAPLANStatsRXPKTBcast, ruckusZDWLANAPRadioStatsTxSignalFrm=ruckusZDWLANAPRadioStatsTxSignalFrm, ruckusZDWLANSSID=ruckusZDWLANSSID, ruckusZDWLANAPFWSize=ruckusZDWLANAPFWSize, ruckusZDWLANAuthSuccessTotal=ruckusZDWLANAuthSuccessTotal, ruckusZDWLANStaTxSuccess=ruckusZDWLANStaTxSuccess, ruckusZDWLANVapWlanTxErrPkt=ruckusZDWLANVapWlanTxErrPkt, ruckusZDWLANStaAssocTime=ruckusZDWLANStaAssocTime, ruckusZDWLANAPTotalUser=ruckusZDWLANAPTotalUser, ruckusZDWLANAPLANStatsTXPKTBcast=ruckusZDWLANAPLANStatsTXPKTBcast, ruckusZDWLANVapWlanRxBytes=ruckusZDWLANVapWlanRxBytes, ruckusZDWLANAPEthIfname=ruckusZDWLANAPEthIfname, ruckusZDWLANAPMultiModeAccessStatus=ruckusZDWLANAPMultiModeAccessStatus, ruckusZDWLANAPRadioStatsFrameErrorRate=ruckusZDWLANAPRadioStatsFrameErrorRate, ruckusZDWLANRogueSignalStrength=ruckusZDWLANRogueSignalStrength, ruckusZDWLANAPDescription=ruckusZDWLANAPDescription, ruckusZDWLANAPIfInUcastPkts=ruckusZDWLANAPIfInUcastPkts, ruckusZDWLANTxByteRate=ruckusZDWLANTxByteRate, ruckusZDWLANAPRadioStatsAuthSuccess=ruckusZDWLANAPRadioStatsAuthSuccess, ruckusZDWLANAPRadioStatsDiassocMisc=ruckusZDWLANAPRadioStatsDiassocMisc, ruckusZDWLANAPLANStatsRXPktErr=ruckusZDWLANAPLANStatsRXPktErr, ruckusZDWLANRateLimitingDown=ruckusZDWLANRateLimitingDown, ruckusZDWLANAPTable=ruckusZDWLANAPTable, ruckusZDWLANAPConnectionMode=ruckusZDWLANAPConnectionMode, ruckusZDWLANAPRadioStatsAuthReq=ruckusZDWLANAPRadioStatsAuthReq, ruckusZDWLANAPMultipleVlanCapability=ruckusZDWLANAPMultipleVlanCapability, ruckusZDWLANObjects=ruckusZDWLANObjects, ruckusZDWLANAPRadioStatsRxBytes=ruckusZDWLANAPRadioStatsRxBytes, ruckusZDWLANAPLANStatsRXByteRate=ruckusZDWLANAPLANStatsRXByteRate, ruckusZDWLANStaTxBytes=ruckusZDWLANStaTxBytes, ruckusZDWLANUpRetxFrame=ruckusZDWLANUpRetxFrame, ruckusZDWLANAPFWAvail=ruckusZDWLANAPFWAvail, ruckusZDWLANAPMeshType=ruckusZDWLANAPMeshType, ruckusZDWLANVapLanRxBytes=ruckusZDWLANVapLanRxBytes, ruckusZDWLANAPIfInOctets=ruckusZDWLANAPIfInOctets, ruckusZDWLANStaAuthMode=ruckusZDWLANStaAuthMode, ruckusZDWLANAPMacAddress=ruckusZDWLANAPMacAddress, ruckusZDWLANVapWlanRxUnicastPkt=ruckusZDWLANVapWlanRxUnicastPkt, ruckusZDWLANStaTxDrop=ruckusZDWLANStaTxDrop, ruckusZDWLANVapWlanTxDropPkt=ruckusZDWLANVapWlanTxDropPkt, ruckusZDWLANAPSWversion=ruckusZDWLANAPSWversion, ruckusZDWLANAPNumRogues=ruckusZDWLANAPNumRogues, ruckusZDWLANAPIfInErrors=ruckusZDWLANAPIfInErrors, ruckusZDWLANAPRadioStatsRTSThreshold=ruckusZDWLANAPRadioStatsRTSThreshold, ruckusZDWLANRogueChannel=ruckusZDWLANRogueChannel, ruckusZDWLANAPRadioStatsReassocResp=ruckusZDWLANAPRadioStatsReassocResp, ruckusZDWLANInfo=ruckusZDWLANInfo, ruckusZDWLANTable=ruckusZDWLANTable, ruckusZDWLANAssocSuccess=ruckusZDWLANAssocSuccess, ruckusZDWLANAPEthPhyLinkStatus=ruckusZDWLANAPEthPhyLinkStatus, ruckusZDWLANRogueTable=ruckusZDWLANRogueTable, ruckusZDWLANAPIfOutErrors=ruckusZDWLANAPIfOutErrors, ruckusZDWLANAPLANStatsRXPKTSucc=ruckusZDWLANAPLANStatsRXPKTSucc, ruckusZDWLANVapTable=ruckusZDWLANVapTable, ruckusZDWLANAPIfType=ruckusZDWLANAPIfType, ruckusZDWLANReassocReq=ruckusZDWLANReassocReq, ruckusZDWLANAPMemUtil=ruckusZDWLANAPMemUtil, ruckusZDWLANAPIfInUnknownProtos=ruckusZDWLANAPIfInUnknownProtos, ruckusZDWLANAPTxFrameError=ruckusZDWLANAPTxFrameError, ruckusZDWLANVapWlanTxUnicastPkt=ruckusZDWLANVapWlanTxUnicastPkt, ruckusZDWLANAPLANStatsTXPKTUcast=ruckusZDWLANAPLANStatsTXPKTUcast, ruckusZDWLANAPLANStatsTXPKTMcast=ruckusZDWLANAPLANStatsTXPKTMcast)
mibBuilder.exportSymbols("RUCKUS-ZD-WLAN-MIB", )
| [
2,
198,
2,
9485,
15571,
7378,
337,
9865,
8265,
371,
16696,
2937,
12,
57,
35,
12,
54,
25697,
12,
8895,
33,
357,
4023,
1378,
16184,
76,
489,
8937,
13,
785,
14,
79,
893,
11632,
8,
198,
2,
7054,
45,
13,
16,
2723,
2393,
1378,
14,
1... | 2.554804 | 37,999 |
""" General database functions.
Should work with DB API 2.0 cursors, tested with psycopg2.
https://www.python.org/dev/peps/pep-0249/
"""
def as_dicts(cursor):
""" Return a list of dictionaries from a result-set.
"""
fields = [k[0] for k in cursor.description]
result = []
rows = cursor.fetchall()
for row in rows:
result.append(dict(zip(fields, row)))
return(result)
def dict_iter(cursor):
""" A generator of result-set dictionaries.
Use this function when the result-set don't fit into memory.
"""
fields = [k[0] for k in cursor.description]
row = cursor.fetchone()
while row:
yield dict(zip(fields, row))
row = cursor.fetchone()
def insert_record(cursor, table, record):
""" Insert a given record into the table specified.
cursor.commit() should be preformed outside this function.
"""
fields = ', '.join(record.keys())
values = ', '.join(['%%(%s)s' % field for field in record.keys()])
sql = "INSERT into %s (%s) VALUES (%s);" % (table, fields, values)
cursor.execute(sql, record)
| [
37811,
3611,
6831,
5499,
13,
628,
220,
220,
220,
10358,
670,
351,
20137,
7824,
362,
13,
15,
13882,
669,
11,
6789,
351,
17331,
22163,
70,
17,
13,
198,
220,
220,
220,
3740,
1378,
2503,
13,
29412,
13,
2398,
14,
7959,
14,
431,
862,
14... | 2.673861 | 417 |
#!/usr/bin/env python
"""
CGNX API -> list sites, example proof of concept.
**Author:** CloudGenix
**Copyright:** (c) 2017-2021 CloudGenix, Inc
**License:** MIT
"""
__author__ = "CloudGenix Developer Support <developers@cloudgenix.com>"
__email__ = "developers@cloudgenix.com"
__copyright__ = "Copyright (c) 2017-2021 CloudGenix, Inc"
__license__ = """
MIT License
Copyright (c) 2017-2021 CloudGenix, Inc
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# standard modules
import argparse
import logging
# CloudGenix Python SDK
import cloudgenix
# alias JSON pretty printer (jd), and JSON Detailed pretty printer (jd_detailed) from cloudgenix SDK.
jd = cloudgenix.jd
jd_detailed = cloudgenix.jd_detailed
# Global Vars
SDK_VERSION = cloudgenix.version
SCRIPT_NAME = 'CloudGenix Python SDK demo'
# Set logging to use function name
logger = logging.getLogger(__name__)
############################################################################
# Begin Script, parse arguments.
############################################################################
# Parse arguments
parser = argparse.ArgumentParser(description="{0}.".format(SCRIPT_NAME))
# Allow Controller modification and debug level sets.
controller_group = parser.add_argument_group('API', 'These options change how this program connects to the API.')
controller_group.add_argument("--controller", "-C",
help="Controller URI, ex. https://api.cloudgenix.com:8443",
default=None)
controller_group.add_argument("--insecure", "-I", help="Disable SSL certificate and hostname verification",
dest='verify', action='store_false', default=True)
login_group = parser.add_argument_group('Login', 'These options allow skipping of interactive login')
login_group.add_argument("--email", "-E", help="Use this email as User Name instead of prompting",
default=None)
login_group.add_argument("--pass", "-PW", help="Use this Password instead of prompting",
default=None)
debug_group = parser.add_argument_group('Debug', 'These options enable debugging output')
debug_group.add_argument("--debug", "-D", help="Verbose Debug info, levels 0-2", type=int,
default=0)
args = vars(parser.parse_args())
if args['debug'] == 1:
logging.basicConfig(level=logging.INFO,
format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
logger.setLevel(logging.INFO)
elif args['debug'] >= 2:
logging.basicConfig(level=logging.DEBUG,
format="%(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s")
logger.setLevel(logging.DEBUG)
else:
# Remove all handlers
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
# set logging level to default
logger.setLevel(logging.WARNING)
############################################################################
# Instantiate API
############################################################################
sdk = cloudgenix.API(controller=args["controller"], ssl_verify=args["verify"])
# set debug
sdk.set_debug(args["debug"])
############################################################################
# Draw Interactive login banner, run interactive login including args above.
############################################################################
print("{0} v{1} ({2})\n".format(SCRIPT_NAME, SDK_VERSION, sdk.controller))
# interactive or cmd-line specified initial login
while sdk.tenant_name is None:
sdk.interactive.login(args["email"], args["pass"])
############################################################################
# End Login handling, begin script..
############################################################################
# Get list of sites.
response = sdk.get.sites()
# status is a boolean based on success/failure. If success, print raw dictionary
if response.cgx_status:
# Can Print as formatted JSON using json module using commented code below.
# raw_sites_dict = response.cgx_content
# print(json.dumps(raw_sites_dict, indent=4))
# But CloudGenix has a built-in pretty printer, can just use that on native response as a shortcut.
# Output is the same as code above.
jd(response)
# else, let user know something didn't work.
else:
print("ERROR: ")
# the jd_detailed builtin pretty-printer will dump all request/response info, and also attempt to hide sensitive
# cookies/headers (AUTH_TOKEN, X-Auth-Token, etc.) to be safe for log messages/etc.
jd_detailed(response)
# end of script, run logout to clear session.
print(sdk.get.logout().cgx_content)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
34,
16630,
55,
7824,
4613,
1351,
5043,
11,
1672,
6617,
286,
3721,
13,
198,
198,
1174,
13838,
25,
1174,
10130,
13746,
844,
198,
198,
1174,
15269,
25,
1174,
357,
66,
8,
217... | 3.18895 | 1,810 |