id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6485120 | """http://ginstrom.com/scribbles/2007/10/08/design-patterns-python-style/
An example of the Template pattern in Python"""
ingredients = "spam eggs apple"
line = '-' * 10
# Skeletons
def iter_elements(getter, action):
"""Template skeleton that iterates items"""
for element in getter():
action(element)
print(line)
def rev_elements(getter, action):
"""Template skeleton that iterates items in reverse order"""
for element in getter()[::-1]:
action(element)
print(line)
# Getters
def get_list():
return ingredients.split()
def get_lists():
return [list(x) for x in ingredients.split()]
# Actions
def print_item(item):
print(item)
def reverse_item(item):
print(item[::-1])
# Makes templates
def make_template(skeleton, getter, action):
"""Instantiate a template method with getter and action"""
def template():
skeleton(getter, action)
return template
# Create our template functions
templates = [make_template(s, g, a)
for g in (get_list, get_lists)
for a in (print_item, reverse_item)
for s in (iter_elements, rev_elements)]
# Execute them
for template in templates:
template()
### OUTPUT ###
# spam
# ----------
# eggs
# ----------
# apple
# ----------
# apple
# ----------
# eggs
# ----------
# spam
# ----------
# maps
# ----------
# sgge
# ----------
# elppa
# ----------
# elppa
# ----------
# sgge
# ----------
# maps
# ----------
# ['s', 'p', 'a', 'm']
# ----------
# ['e', 'g', 'g', 's']
# ----------
# ['a', 'p', 'p', 'l', 'e']
# ----------
# ['a', 'p', 'p', 'l', 'e']
# ----------
# ['e', 'g', 'g', 's']
# ----------
# ['s', 'p', 'a', 'm']
# ----------
# ['m', 'a', 'p', 's']
# ----------
# ['s', 'g', 'g', 'e']
# ----------
# ['e', 'l', 'p', 'p', 'a']
# ----------
# ['e', 'l', 'p', 'p', 'a']
# ----------
# ['s', 'g', 'g', 'e']
# ----------
# ['m', 'a', 'p', 's']
# ----------
| StarcoderdataPython |
1676946 | def extract_full_names(people):
"""Return list of names, extracting from first+last keys in people dicts.
- people: list of dictionaries, each with 'first' and 'last' keys for
first and last names
Returns list of space-separated first and last names.
>>> names = [
... {'first': 'Ada', 'last': 'Lovelace'},
... {'first': 'Grace', 'last': 'Hopper'},
... ]
>>> extract_full_names(names)
['<NAME>', '<NAME>']
"""
t = [("%s %s" % (x['first'], x['last'])) for x in people]
return t | StarcoderdataPython |
6623922 | class Rat(object):
n : int = 0
d : int = 0
def __init__(self : Rat):
pass
def new(self : Rat, n : int, d : int) -> Rat:
self.n = n
self.d = d
return self
def mul(self : Rat, other : Rat) -> Rat:
return Rat().new(self.n * other.n, self.d * other.d)
r1 : Rat = None
r2 : Rat = None
r1 = Rat().new(4, 5)
r2 = Rat().new(2, 3)
print(r1.mul(r2).mul(r2).n) | StarcoderdataPython |
6434020 | <gh_stars>1-10
from sklearn.datasets import load_breast_cancer
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
X, y = load_breast_cancer(return_X_y=True)
classes = ['Malignant', 'Benign']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42, stratify=y)
model = SVC(kernel='linear', gamma="auto")
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
print(f"Accuracy: {accuracy_score(y_pred, y_test)}")
print(f"Classification report: {classification_report(y_test, y_pred)}")
print(f"Confusion Matrix design:\n{confusion_matrix(y_test, y_pred)}")
| StarcoderdataPython |
12622 | <reponame>florianm/biosys
from django.shortcuts import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from main.models import Project
from main.tests import factories
from main.tests.api import helpers
class TestWhoAmI(helpers.BaseUserTestCase):
def setUp(self):
super(TestWhoAmI, self).setUp()
self.url = reverse('api:whoami')
def test_get(self):
client = self.anonymous_client
self.assertEqual(
client.get(self.url).status_code,
status.HTTP_200_OK
)
user = factories.UserFactory()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
# test that the response contains username, first and last name and email at least and the id
data = resp.json()
self.assertEqual(user.username, data['username'])
self.assertEqual(user.first_name, data['first_name'])
self.assertEqual(user.last_name, data['last_name'])
self.assertEqual(user.email, data['email'])
self.assertEqual(user.id, data['id'])
# test that the password is not in the returned fields
self.assertFalse('password' in data)
def test_not_allowed_methods(self):
client = self.readonly_client
self.assertEqual(
client.post(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.put(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.patch(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
class TestStatistics(TestCase):
def setUp(self):
self.url = reverse('api:statistics')
def test_get(self):
anonymous = APIClient()
client = anonymous
self.assertIn(
client.get(self.url).status_code,
[status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]
)
user = factories.UserFactory.create()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
# expected response with no data
expected = {
'projects': {'total': 0},
'datasets': {
'total': 0,
'generic': {'total': 0},
'observation': {'total': 0},
'speciesObservation': {'total': 0},
},
'records': {
'total': 0,
'generic': {'total': 0},
'observation': {'total': 0},
'speciesObservation': {'total': 0},
},
'sites': {'total': 0},
}
self.assertEqual(expected, resp.json())
# create one project
program = factories.ProgramFactory.create()
project = factories.ProjectFactory.create(program=program)
expected['projects']['total'] = 1
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
self.assertEqual(expected, resp.json())
# create some sites
count = 3
factories.SiteFactory.create_batch(
count,
project=project
)
expected['sites']['total'] = count
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
self.assertEqual(expected, resp.json())
def test_not_allowed_methods(self):
user = factories.UserFactory.create()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
self.assertEqual(
client.post(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.put(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.patch(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
class TestSpecies(TestCase):
# set the species list to be the testing one
species_facade_class = helpers.LightSpeciesFacade
def setUp(self):
from main.api.views import SpeciesMixin
SpeciesMixin.species_facade_class = self.species_facade_class
self.url = reverse('api:species')
def test_get(self):
anonymous = APIClient()
client = anonymous
self.assertEqual(
client.get(self.url).status_code,
status.HTTP_200_OK
)
user = factories.UserFactory.create()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
resp = client.get(self.url)
self.assertEqual(
resp.status_code,
status.HTTP_200_OK
)
def test_not_allowed_methods(self):
user = factories.UserFactory.create()
user.set_password('password')
user.save()
client = APIClient()
self.assertTrue(client.login(username=user.username, password='password'))
self.assertEqual(
client.post(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.put(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
self.assertEqual(
client.patch(self.url, {}).status_code,
status.HTTP_405_METHOD_NOT_ALLOWED
)
| StarcoderdataPython |
6591731 | from django.contrib import admin
from teacher.models import Teacher
from django.contrib.auth.admin import UserAdmin
class TeacherAdmin(UserAdmin):
list_display = ('email','username','date_joined','is_admin','is_staff')
search_fields = ('email','username')
readonly_fields = ('date_joined','last_login')
filter_horizontal = ()
list_filter = ()
fieldsets = ()
admin.site.register(Teacher,TeacherAdmin) | StarcoderdataPython |
1675221 | import collections
import re
Progress = collections.namedtuple('Progress', [
'frame', 'fps', 'size', 'time', 'bitrate', 'speed'
])
Progress_psnr = collections.namedtuple('Progress_psnr',['PSNR'])
Progress_vmaf = collections.namedtuple('Progress_vmaf',['VMAF'])
Progress_ssim = collections.namedtuple('Progress_ssim',['SSIM'])
Resolution = collections.namedtuple('Resolution',['resolution'])
progress_pattern = re.compile(
r'(frame|fps|size|time|bitrate|speed)\s*\=\s*(\S+)'
)
vmaf_pattern = re.compile(
r'(VMAF)\s*score:\s*(\S+)'
)
psnr_pattern = re.compile(
r'(PSNR)\s*.*average:\s*(\S+)'
)
ssim_pattern = re.compile(
r'(SSIM)\s*.*All:\s*(\S+)'
)
resolution_pattern = re.compile(
r'(Video).* ([0-9]+x[0-9]+)'
)
def build_options(options):
arguments = []
for key, value in options.items():
if key.startswith('-'):
key = key[1:]
argument = ['-{key}'.format(key=key)]
if value is not None:
argument.append(str(value))
arguments.extend(argument)
return arguments
async def readlines(stream):
pattern = re.compile(br'[\r\n]+')
data = bytearray()
while not stream.at_eof():
lines = pattern.split(data)
data[:] = lines.pop(-1)
for line in lines:
yield line
data.extend(await stream.read(1024))
def parse_progress(line):
items = {
key: value for key, value in progress_pattern.findall(line)
}
items_psnr = {}
items_vmaf = {}
items_ssim = {}
if "SSIM" in line:
items_ssim = {key:value for key, value in ssim_pattern.findall(line)}
return Progress_ssim(SSIM=items_ssim['SSIM'])
if "PSNR" in line:
items_psnr = {key:value for key, value in psnr_pattern.findall(line)}
return Progress_psnr(PSNR=items_psnr['PSNR'])
if "VMAF" in line:
items_vmaf = {key:value for key, value in vmaf_pattern.findall(line)}
return Progress_vmaf(VMAF=items_vmaf['VMAF'])
if "Video:" in line:
items_resolution = {key:value for key, value in resolution_pattern.findall(line)}
return Resolution(resolution=items_resolution['Video'])
if not items:
return None
if "N" in items['size']:
items['size'] = "0"
if "N" in items['bitrate']:
items['bitrate'] = "0"
return Progress(
frame=int(items['frame']),
fps=float(items['fps']),
size=int(items['size'].replace('kB', '')) * 1024,
time=items['time'],
bitrate=float(items['bitrate'].replace('kbits/s', '')),
speed=float(items['speed'].replace('x', '')),
)
| StarcoderdataPython |
9786107 | <filename>evaluatewindow.py
import sqlite3
from PyQt5 import QtCore, QtGui, QtWidgets
myteam= sqlite3.connect("TEAM.db")
class Ui_EvaluateWindow(object):
def setupUi(self, EvaluateWindow):
EvaluateWindow.setObjectName("EvaluateWindow")
EvaluateWindow.resize(699, 509)
self.centralwidget = QtWidgets.QWidget(EvaluateWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(90, 180, 521, 251))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.PlayerList = QtWidgets.QListWidget(self.horizontalLayoutWidget)
self.PlayerList.setObjectName("listView")
self.horizontalLayout.addWidget(self.PlayerList)
self.ScoreList = QtWidgets.QListWidget(self.horizontalLayoutWidget)
self.ScoreList.setObjectName("listView_2")
self.horizontalLayout.addWidget(self.ScoreList)
self.horizontalLayoutWidget_2 = QtWidgets.QWidget(self.centralwidget)
self.horizontalLayoutWidget_2.setGeometry(QtCore.QRect(100, 110, 501, 51))
self.horizontalLayoutWidget_2.setObjectName("horizontalLayoutWidget_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget_2)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
#label players
self.Players = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.Players.setFont(font)
self.Players.setAlignment(QtCore.Qt.AlignCenter)
self.Players.setIndent(10)
self.Players.setObjectName("Players")
self.horizontalLayout_2.addWidget(self.Players)
#label points
self.Points = QtWidgets.QLabel(self.horizontalLayoutWidget_2)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.Points.setFont(font)
self.Points.setAlignment(QtCore.Qt.AlignCenter)
self.Points.setIndent(10)
self.Points.setObjectName("Points")
self.horizontalLayout_2.addWidget(self.Points)
#PUSHBUTTON
self.Calculate = QtWidgets.QPushButton(self.centralwidget)
self.Calculate.setGeometry(QtCore.QRect(300, 440, 93, 28))
self.Calculate.setObjectName("Calculate")
#combox 1
self.SelectTeam = QtWidgets.QComboBox(self.centralwidget)
self.SelectTeam.setGeometry(QtCore.QRect(150, 50, 141, 22))
self.SelectTeam.setObjectName("SelectTeam")
#combox2
self.Selectmatch = QtWidgets.QComboBox(self.centralwidget)
self.Selectmatch.setGeometry(QtCore.QRect(400, 50, 141, 22))
self.Selectmatch.setObjectName("Selectmatch")
self.SelectTeam.addItem("")
self.Selectmatch.addItem("")
self.Selectmatch.addItem("")
self.Selectmatch.addItem("")
self.Selectmatch.addItem("")
self.Selectmatch.addItem("")
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setGeometry(QtCore.QRect(60, 80, 591, 20))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
EvaluateWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(EvaluateWindow)
self.statusbar.setObjectName("statusbar")
EvaluateWindow.setStatusBar(self.statusbar)
self.retranslateUi(EvaluateWindow)
QtCore.QMetaObject.connectSlotsByName(EvaluateWindow)
def retranslateUi(self, EvaluateWindow):
_translate = QtCore.QCoreApplication.translate
EvaluateWindow.setWindowTitle(_translate("EvaluateWindow", "MainWindow"))
self.Players.setText(_translate("EvaluateWindow", "Players"))
self.Points.setText(_translate("EvaluateWindow", "Points"))
self.Calculate.setText(_translate("EvaluateWindow", "Calculate"))
self.SelectTeam.setItemText(0,_translate('EvaluateWindow', "--SELECT TEAM--"))
self.Selectmatch.setItemText(0,_translate('EvaluateWindow', "--SELECT MATCH--"))
self.Selectmatch.setItemText(1,_translate('EvaluateWindow', "Match1"))
self.Selectmatch.setItemText(2,_translate('EvaluateWindow', "Match2"))
self.Selectmatch.setItemText(3,_translate('EvaluateWindow', "Match3"))
self.Selectmatch.setItemText(4,_translate('EvaluateWindow', "Match4"))
cu= myteam.cursor()
cu.execute("SELECT NAMES from team;")
team= cu.fetchall()
teamlist=[]
for i in range(len(team)):
teamlist.append(team[i][0])
for name in set(teamlist):
self.SelectTeam.addItem(name)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
EvaluateWindow = QtWidgets.QMainWindow()
ui = Ui_EvaluateWindow()
ui.setupUi(EvaluateWindow)
EvaluateWindow.show()
sys.exit(app.exec_())
| StarcoderdataPython |
3246782 | """Fun(c) with dates"""
# %% Imports
from datetime import datetime, timedelta
from typing import List
# %% Mondays
def mondays_between_two_dates(start: datetime, end: datetime) -> List[str]:
"""Returns the dates of each Monday between two dates, inclusive.
:param start (str) : ISO-formatted start date.
:param end (str) : ISO-formatted end date.
:return (List[str]) : List of dates of Mondays falling b/w the two.
"""
mon_dates = []
# Somewhat-convoluted way to find the first Monday
# * by 0 (Monday) means no change
start += timedelta(((7 * start.weekday()) + (7 - start.weekday())) % 7)
# # Interesting idea to get nearest Monday, but doesn't work
# start += timedelta(days=-start.weekday(), weeks=1)
while start <= end: # Add timedelta of 7 until on or after end date
mon_dates.append(start.strftime("%Y-%m-%d"))
start += timedelta(7)
return mon_dates # Return list of dates
print("Start on a monday; end on a Wednesday:")
print(mondays_between_two_dates(datetime(2018, 1, 1), datetime(2018, 1, 31)))
print("\nStart on a Wednesday; end on a Monday:")
print(mondays_between_two_dates(datetime(2018, 1, 3), datetime(2018, 1, 29)))
# %% Mondays
def monday_gen(start: datetime, end: datetime) -> List[str]:
"""Generator version of the above, just for fun."""
start += timedelta(((7 * start.weekday()) + (7 - start.weekday())) % 7)
def mon_gen(start, end):
while start <= end:
yield start.strftime("%Y-%m-%d")
start += timedelta(7)
return [date for date in mon_gen(start, end)]
print("Start on a monday; end on a Wednesday:")
print(monday_gen(datetime(2018, 1, 1), datetime(2018, 1, 31)))
print("\nStart on a Wednesday; end on a Monday:")
print(monday_gen(datetime(2018, 1, 3), datetime(2018, 1, 29)))
# %% Separate / unnested generator func version
# Note: Not typed quite right - it "returns" a generator
def monday_generator(start: datetime, end: datetime, sep: str = "-") -> str:
"""Generator function for Mondays between two dates, inclusive."""
start += timedelta(((7 * start.weekday()) + (7 - start.weekday())) % 7)
while start <= end:
yield start.strftime(f"%Y{sep}%m{sep}%d")
start += timedelta(7)
print("Start on a monday; end on a Wednesday:")
print([m for m in monday_generator(datetime(2018, 1, 1), datetime(2018, 1, 31))])
print("\nStart on a Wednesday; end on a Monday:")
print([m for m in monday_generator(datetime(2018, 1, 3), datetime(2018, 1, 29), "/")])
# %% Parsing datetime strings
print(datetime.fromisoformat("2018-01-01"))
# %%
def monday_genr(start, end, sep: str = "-") -> str:
"""Generator function for Mondays between two dates, inclusive.
Accepts ISO-formatted strings or datetime objects.
"""
if type(start) == str:
start = datetime.fromisoformat(start)
if type(end) == str:
end = datetime.fromisoformat(end)
start += timedelta(((7 * start.weekday()) + (7 - start.weekday())) % 7)
while start <= end:
yield start.strftime(f"%Y{sep}%m{sep}%d")
start += timedelta(7)
print("Start on a monday; end on a Wednesday (string):")
print([m for m in monday_genr("2018-01-01", "2018-01-31")])
print("\nStart on a Wednesday; end on a Monday (datetime):")
print([m for m in monday_genr(datetime(2018, 1, 3), datetime(2018, 1, 29), "/")])
# %% Weekly date generator
def day_date_generator(start, end, sep: str = "-") -> str:
"""Generator function for weekly dates between two dates.
Accepts date strings or datetime objects.
"""
if type(start) == str:
start = datetime.fromisoformat(start.replace("/", "-"))
if type(end) == str:
end = datetime.fromisoformat(end.replace("/", "-"))
while start <= end:
yield start.strftime(f"%Y{sep}%m{sep}%d")
start += timedelta(7) # Weekly increments
| StarcoderdataPython |
12859639 | """
WSGI config for pycess project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pycess.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
import pycess.settings
if hasattr(pycess.settings, 'USE_DJ_STATIC') and pycess.settings.USE_DJ_STATIC:
from dj_static import Cling
application = Cling(get_wsgi_application())
| StarcoderdataPython |
3354966 | from dataclasses import dataclass
from dataclasses_json import dataclass_json
from typing import Optional
from .texture_info import TextureInfo
@dataclass_json
@dataclass
class NormalTextureInfo(TextureInfo):
"""
Material Normal Texture Info
Properties:
index (integer) The index of the texture. (Required)
texCoord (integer) The set index of texture's TEXCOORD attribute used for texture coordinate mapping. (Optional,
default: 0)
scale (number) The scalar multiplier applied to each normal vector of the normal texture. (Optional, default: 1)
extensions (object) Dictionary object with extension-specific objects. (Optional)
extras (any) Application-specific data. (Optional)
"""
scale: Optional[float] = None
| StarcoderdataPython |
11320179 | import os
import sys
from datetime import datetime
class Logger(object):
"""class that logs output to a log file and stdout"""
def __init__(self, name, save_dir=None, attr=None, ovrw=False):
"""Constructor
name: the name for the log file
attr: an optional additional name for the file
save_dir: folder name to save in
ovrw: whether or not to overwrite and existing file
"""
name = str(name)
year, month, day = map(str, [datetime.today().year,
datetime.today().month,
datetime.today().day])
if len(month) == 1:
month = '0{}'.format(month)
if len(day) == 1:
day = '0{}'.format(day)
today = '-'.join([year, month, day])
if attr:
log_name = '_'.join([name, str(attr), today])
else:
log_name = '_'.join([name, today])
log_name = ''.join([log_name, '.log'])
if save_dir:
save_dir = str(save_dir)
log_path = os.path.realpath(save_dir)
if not os.path.exists(log_path):
try:
os.makedirs(log_path)
except:
print('Could not make log in {}, making it in cwd'
.format(log_path))
log_path = os.getcwd()
else:
log_path = os.getcwd()
self.log_path = os.path.join(log_path, log_name)
if ovrw:
os.unlink(self.log_path)
self.__call__('Log Overwritten')
def name(self):
return os.path.split(self.log_path)[1]
def __call__(self, msg, ex=False, exit_code=-1):
""" writes to log file and stdout
msg: message to log
ex: whether or not to exit
exit_code: the exit code to emit, unused if not exiting
"""
msg = ''.join([str(msg), '\n'])
sys.stdout.write(msg)
sys.stdout.flush()
now = datetime.now().strftime("%X")
with open(self.log_path, 'a') as log:
log.write(' -> '.join([now, msg]))
log.flush()
if ex:
exit_message = 'Exiting with code: {}\n'.format(exit_code)
sys.stdout.write(exit_message)
sys.stdout.flush()
with open(self.log_path, 'a') as log:
log.write(' -> '.join([now, exit_message]))
log.flush()
sys.exit(exit_code)
def main():
pass
if __name__ == '__main__':
main()
| StarcoderdataPython |
6429627 | from fidimag.common import CuboidMesh, constant
from fidimag.micro import UniformExchange, Sim, Zeeman
import numpy as np
def test_add_remove_interaction_simple():
mesh = CuboidMesh(nx=10, ny=10, nz=10, unit_length=1e-9)
name = 'test_add_remove_intn_simple'
sim = Sim(mesh, name=name)
sim.set_m(lambda pos: (0, 0, 1))
sim.set_Ms(5.8e5)
exch = UniformExchange(A=1e-11, name='Exchange')
zee = Zeeman((0, 0, 0.05 / constant.mu_0), name='Zeeman')
sim.add(exch)
sim.add(zee)
sim.driver.run_until(1e-9)
sim.remove('Zeeman')
sim.driver.run_until(2e-9)
f = open(name + '.txt')
lines = f.read().split('\n')
headers = lines[0].split()
first_data = lines[2].split()
last_data = lines[2].split()
# Find the position in the data table
position = headers.index('E_Zeeman')
assert np.abs(float(last_data[position])) < 1e-15
| StarcoderdataPython |
6445138 | import sys
src = sys.argv[1]
tgt = sys.argv[2]
fw = open(tgt, 'w', encoding="utf8")
with open(src, 'r', encoding='utf8') as f:
for line in f:
items = line.strip().split()
utt = items[0]
txt = " ".join(list("".join(items[1:])))
fw.write(utt + " " + txt + '\n')
fw.close()
| StarcoderdataPython |
6509274 | <reponame>nikitagoel7/Client-Server-Chatting-Application<gh_stars>0
# Python TCP Client A
import socket
host = socket.gethostname()
port = 64000
BUFFER_SIZE = 2048
MESSAGE = input("tcpClientA: Enter message/ Enter exit:")
tcpClientA = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcpClientA.connect((host, port))
while MESSAGE != 'exit':
tcpClientA.send(MESSAGE.encode('utf-8') )
data = tcpClientA.recv(BUFFER_SIZE)
print (" Client2 received data:", data.decode('utf-8'))
MESSAGE = input("tcpClientA: Enter message to continue/ Enter exit:")
tcpClientA.close()
| StarcoderdataPython |
1910062 | """
This file provides some simple tools to simulate and work with a tic tac toe board.
The functions provided assume the board is represented by
- np array of shape 3,3
- player 1 is 1, player 2 is -1 and empty is 0
"""
import logging
import itertools
import numpy as np
import gym
from gym import spaces, error
from typing import Any, Tuple, Dict, List, Optional
logger = logging.getLogger(__name__)
def policy_page_lines(board: np.array, player: int) -> Tuple[int, int]:
"""
return a possible position that hasn't yet been played.
This policy tries simply to fill the board from the top left.
Args:
board: a 3*3 numpy array whose elements are 0, 1, -1 representing empty, player1 and player 2,
player: either 1 or -1, representing player 1 and player 2
Returns: a tuple representing an unfilled coordinate that's selected.
"""
for row in range(board.shape[0]):
for col in range(board.shape[1]):
if board[row, col] == 0:
return (row, col)
def check_win(board: np.array) -> int:
"""
Given a 3*3 numpy array whose elements are 0, 1, -1
representing empty, player1 and player 2,
return the player who's won if any or 0 if no winner found
Args:
board (np.array): 3 x 3 board
Returns:
int: 0 -> no winner
1 -> player 1
-1 -> player 2
"""
assert board.shape == (3, 3)
for axis in [0, 1]:
axis_sum = board.sum(axis=axis)
if 3 in axis_sum:
return 1
elif -3 in axis_sum:
return -1
diag = board.diagonal().sum()
diag_inv = np.fliplr(board).diagonal().sum()
for diag_sum in [diag, diag_inv]:
if diag_sum == 3:
return 1
elif diag_sum == -3:
return -1
# no winner yet
return 0
def to_one_hot(board: np.array) -> np.array:
"""
Convert the representation to one hot encoding
We assume a 3x3 board input
Args:
board (np.array): 3 x 3
Returns:
3 x 3 x 3:
board[0 : :] = location of empty squares
board[1, :, :] = location of player 1
board[2, :, :] = location of player 2 moves
"""
oh = np.stack((board == 0, board == 1, board == -1))
return oh.astype(int)
def to_human(board: np.array, symbols) -> np.array:
"""Convert this into a"""
human_board = np.full(board.shape, " ")
for value, sym in symbols.items():
human_board[np.where(board == value)] = sym
return human_board
class TicTacToeEnv(gym.Env):
"""
TicTacToe environment in the openai gym style: https://gym.openai.com/docs/
"""
# openai gym api - can also have rgb (for things like atari games) or ansi (text)
metadata = {"render.modes": ["human"]}
# constants that define the game's implementation
TURN_ORDER = (1, -1)
BOARD_SHAPE = 3, 3
SYMBOLS = {1: "X", -1: "O"}
def __init__(self):
# open AI Gym API
# necessary to set these to allow for easy network architecture
# space of the actions - in this case the coordinate of the board to play
self.action_space = spaces.Tuple([spaces.Discrete(3), spaces.Discrete(3)])
# how are the observations represented. Since we return the board, we're returning
# a discrete 3x3 matrix where each entry is {-1, 0, 1}.
# this doesn't have a nice gym.spaces representation so we leave it unfilled for now
self.observation_space = None
# state representation variables. We define them here and set them in reset
self.board = None
self.turn_iterator = None
self.curr_turn = None
self.done = None
# reset does the initalisation
self.reset()
def reset(self) -> np.array:
self.board = np.zeros(self.BOARD_SHAPE, dtype=int)
self.turn_iterator = itertools.cycle(self.TURN_ORDER)
self.curr_turn = next(self.turn_iterator)
self.done = False
return self._get_obs()
def _get_obs(self) -> np.array:
"""
Abstracted the observation from the underlying state though in this case they're
identical. This is a common pattern in most third party gym environments.
This makes changing the state output as simple as a subclass that overrides this function
as well as the action_space/observation space as opposed to the more onerous gym wrapper
Returns:
np.array of 3x3 representing the board in it's default state
"""
return self.board
def step(
self, action: Tuple[int, int], player: Optional[int] = None
) -> Tuple[Any, float, bool, Dict]:
"""
Args:
action: locaton we
player: In more complex environments, we'll want to ensure we're not playing as the
the same player twice. This provides a way of checking we're not breaking
order by mistake
Returns:
observation, reward, done, info
"""
# check the action is valid and the game isn't over
action = tuple(action)
if self.board[action] != 0:
raise error.InvalidAction(f"action {action} is not a vaid choice")
if self.done:
raise error.ResetNeeded("Call reset as game is over")
if player and player != self.curr_turn:
raise error.InvalidAction(
f"Player {self.curr_turn}'s turn. Move request from {player}"
)
logger.debug("Selected action: %s on turn %d", action, self.turns_played + 1)
# set the location on the board to the current player. Since curr_turn
# and current player use the same indicator, we just use that
self.board[action] = self.curr_turn
# check if the game is over. Reward is player that won (1 or -1)
reward = check_win(self.board)
if reward:
self.done = True
return self._get_obs(), float(reward), self.done, {}
# check if the game is over (i.e. no more turns). Since we don't have a win
# it must be a draw
if self.turns_played == 9:
self.done = True
return self._get_obs(), 0.0, self.done, {}
# otherwise game is still going. Advance turn and return state + no reward
self.curr_turn = next(self.turn_iterator)
return self._get_obs(), 0.0, self.done, {}
@property
def valid_actions(self) -> List[Tuple[int, int]]:
return [tuple(act) for act in np.argwhere(self.board == 0).tolist()]
@property
def turns_played(self) -> int:
return np.sum(self.board != 0)
def render(self, mode="human"):
import tabulate
tabulate.PRESERVE_WHITESPACE = True
human_board = to_human(self.board, self.SYMBOLS)
print("\n")
print(f"Turn : {self.turns_played}")
print(tabulate.tabulate(human_board.tolist(), tablefmt="fancy_grid"))
| StarcoderdataPython |
6628473 | <gh_stars>1-10
from makepy.shell import run, rm
import logging
log = logging.getLogger(__name__)
def tox(envlist=None):
log.info('starting tox tests for envlist: %s', envlist)
if envlist is None: run(['tox'])
else: run(['tox', '-e', envlist])
def clean(): rm('.tox')
| StarcoderdataPython |
101970 | <gh_stars>1-10
import json
from datetime import datetime
from bson import ObjectId
from dateutil import parser
class Project:
def __init__(
self,
name,
city,
description,
start_time,
end_time,
):
self.name = name
self.city = city
self.description = description
self.start_time = str(start_time)
self.end_time = str(end_time)
def to_dict(self):
self.start_time = parser.parse(self.start_time)
self.end_time = parser.parse(self.end_time)
student_dict = self.__dict__
return student_dict
def to_json(self):
return json.dumps(self, default=lambda student: student.__dict__)
| StarcoderdataPython |
4952864 | <reponame>jeokrohn/wxc_sdk
from .base import TestCaseWithLog
class TestLocation(TestCaseWithLog):
def test_001_list_all(self):
"""
list all locations
"""
location_list = list(self.api.locations.list())
print(f'Got {len(location_list)} locations')
| StarcoderdataPython |
3345085 | #!/usr/bin/env python
##
## See COPYING file distributed along with the ncanda-data-integration package
## for the copyright and license terms
##
#
# List of fields to copy straight into REDCap
#
fields_to_copy = [ "AL1AgeOns", "AL1a",
'ASa_ao2DK', 'ASb_ao2DK', 'ASc1_ao6DK', 'ASc2_ao6DK', 'AS_ao9DK', 'AS_ao10DK', 'AS1_ao11DK', 'AS2_ao11DK', 'AS1_ao15DK', 'AS2_ao15DK', 'AS1_ao16DK',
'AS2_ao16DK', 'ASa1_ao14DK', 'ASa2_ao14DK', 'ASc1_ao14DK', 'ASc2_ao14DK', 'AS1_ao17DK', 'AS2_ao17DK', 'AS1_ao19DK', 'AS2_ao19DK', 'AS1_ao18DK',
'AS2_ao18DK', 'AS1_ao20DK', 'AS2_ao20DK',
'AS2a', 'ASa_ao2', 'AS2b', 'ASb_ao2', 'AS6b', 'ASc1_ao6', 'ASc2_ao6', 'AS9', 'AS_ao9',
'AS10a', 'AS_ao10', 'AS11', 'AS1_ao11', 'AS2_ao11', 'AS15', 'AS1_ao15', 'AS2_ao15', 'AS16', 'AS1_ao16', 'AS2_ao16',
'AS14', 'ASa1_ao14', 'ASa2_ao14', 'AS14b', 'ASc1_ao14', 'ASc2_ao14', 'AS17a', 'AS1_ao17', 'AS2_ao17',
'AS19', 'AS1_ao19', 'AS2_ao19', 'AS18b', 'AS1_ao18', 'AS2_ao18', 'AS20', 'AS1_ao20', 'AS2_ao20',
'OC1', 'OC_AO8', 'OC9', 'OC_ao16',
'PN1x', 'PN2a', 'PN2b', 'PN5', 'PN_ao8', 'PN_ao8DK', 'PN_ao8DK',
'DP4a', 'DP4b', 'DP3', 'DP3_1', 'DP11', 'DP12', 'DP15a', 'DP15b', 'DP15c', 'DP15d',
'DM8', 'DM8B', 'DM8_SPEC',
'DM15','DM8c_Spec','DM8c','DM8d','DM9','DM10','DM14','DM14a','DM14b','DM14c','DM15','DM15a',
'DM15b','DM15c','DM15d_1','DM15d_2','DM15d_3','DM15d_4','DM15d_5','DM15e','DM16','MH4a','MH4b',
'MH4c','MH5','Mh6a','Mh6a2','Mh6a3','Mh6a4','Mh6a5','Mh6a6','Mh6a7','MH6b','MH6b2','MH6b3',
'MH6b4','MH6b5','MH6b6','MH6b7','MH8','MH8a1','MH8a2','MH8a3','MH8a4','MH8a5','MH8a6','MH8a7',
'MH8a8','MH9','MH9a','MH9b','MH10','MH10a','AL5','AL5a','AL5b','AL6','AL6a','AL8','AL8a','AL8AgeOns',
'AL8d','AL10','AL11','AL12','AL12a','AL12c','AL13','AL13b','AL14','AL14b','AL15','AL15a','AL17',
'AL17b','AL19','AL21','AL21d','AL22','AL22b','AL23','AL23b','AL24','AL24b','AL25','AL25b','AL28',
'AL28b','MJ1a','MJ2AgeOns','MJ2C','MJ4','MJ5','MJ7','MJ7A','MJ7B','MJ8','MJ9','MJ12','MJ13','MJ14','MJ16','MJ17',
'PT1','PT1a1','PT1a1Age','PT1a2','PT1a2Age','PT1a3','PT1a3Age','PT1a4','PT1a4Age','PT1a5','PT1a5Age','PT1a6','PT1a6Age','PT1a7','PT1a7Age',
'PT1a8','PT1a8Age','PT1a9','PT1a9Age','PT1a10','PT1a10Age','PT1a11','PT1a11Age','PT1a12','PT1a12Age','PT1a13','PT1a13Age','PT1a14','PT1a14Age',
'PT1a15','PT1a15Age','PT1a16','PT1a16Age','PT1a17','PT1a17Age','PT1a18','PT1a18Age','PT1a19','PT1a19Age','PT1a19_Specify','PT1a20','PT1a20Age',
'PT1a20_Specify','PT1b','PT1bEVENT','PT3','PT4','PT5','PT6','PT7','PT8a','PT8b','PT9','PT10','PT11','PT11a','PT12','PT13','PT14','PT15',
'PT16a','PT16b','PT17','PT18','PT19','PT20','PT21','PT22a','PT22b','PT23','PT23a1','PT23a3','PT23b','PT23c','PT23d','PT23f','PT23g','PT23h',
'PT23i','PT23j','PT23Probe','PT24','PT24a','PT24b','PT25_NUM','PT25_UNIT','PT26_NUM','PT26_UNIT','PT27AgeRec','PT27Rec','PT27a','PT27FromAge1',
'PT27ToAge1','PT27FromAge2','PT27ToAge2','PT27b','PT28','PT28a']
#
# Table of field offsets as (fromIndex,toIndex) for ASC SSAGA data (Manipula's output)
#
field_offsets = {
"IND_ID" : ( 0, 11 ),
"INTV_DT" : ( 11, 19 ),
"IntvYr" : ( 19, 23 ),
"IntvMo" : ( 23, 25 ),
"Target" : ( 25, 26 ),
"DM1" : ( 26, 27 ),
"DM2_FT" : ( 27, 28 ),
"DM2_IN" : ( 28, 30 ),
"DM3" : ( 30, 33 ),
"DM3a" : ( 33, 36 ),
"DM3b" : ( 36, 38 ),
"DM5" : ( 38, 46 ),
"DM5a" : ( 46, 48 ),
"DM5b" : ( 48, 50 ),
"DM5c" : ( 50, 56 ),
"DM4" : ( 56, 59 ),
"DM4Decade" : ( 59, 61 ),
"AgeMinus1" : ( 61, 80 ),
"CURRDTE" : ( 80, 88 ),
"CurrYr" : ( 88, 107 ),
"CurrMnth" : ( 107, 109 ),
"BrthMnth" : ( 109, 111 ),
"BrthYr" : ( 111, 130 ),
"ConfirmAge" : ( 130, 131 ),
"DM6" : ( 131, 132 ),
"DM7" : ( 132, 133 ),
"DM8B" : ( 133, 134 ),
"DM8_SPEC" : ( 134, 214 ),
"DM8" : ( 214, 215 ),
"DM8I_1" : ( 215, 217 ),
"DM8II_1" : ( 217, 219 ),
"DM8I_2" : ( 219, 221 ),
"DM8II_2" : ( 221, 223 ),
"DM8I_3" : ( 223, 225 ),
"DM8II_3" : ( 225, 227 ),
"DM8I_4" : ( 227, 229 ),
"DM8II_4" : ( 229, 231 ),
"DM8c_Spec" : ( 231, 486 ),
"DM8c" : ( 486, 488 ),
"DM8c1" : ( 488, 489 ),
"DM8d" : ( 489, 492 ),
"DM9" : ( 492, 493 ),
"DM9_YR" : ( 493, 497 ),
"DM10" : ( 497, 498 ),
"DM10a" : ( 498, 499 ),
"DM11" : ( 499, 501 ),
"DM11A" : ( 501, 505 ),
"DM11A2" : ( 505, 509 ),
"DM11A3" : ( 509, 513 ),
"DM11A4" : ( 513, 517 ),
"DM11A5" : ( 517, 521 ),
"DM11A6" : ( 521, 525 ),
"DM11A7" : ( 525, 529 ),
"DM11A8" : ( 529, 533 ),
"DM11A9" : ( 533, 537 ),
"DM11A10" : ( 537, 541 ),
"DM12" : ( 541, 543 ),
"DM12A" : ( 543, 547 ),
"DM12A2" : ( 547, 551 ),
"DM12A3" : ( 551, 555 ),
"DM12A4" : ( 555, 559 ),
"DM12A5" : ( 559, 563 ),
"DM12A6" : ( 563, 567 ),
"DM12A7" : ( 567, 571 ),
"DM12A8" : ( 571, 575 ),
"DM12A9" : ( 575, 579 ),
"DM12A10" : ( 579, 583 ),
"DM13" : ( 583, 584 ),
"DM13a" : ( 584, 586 ),
"DM_AR13" : ( 586, 588 ),
"DMRec13" : ( 588, 589 ),
"DM14" : ( 589, 591 ),
"DM14a" : ( 591, 592 ),
"DM14b" : ( 592, 594 ),
"DM14c" : ( 594, 596 ),
"DM14SEX" : ( 596, 597 ),
"DM14_MO" : ( 597, 599 ),
"DM14_YR" : ( 599, 603 ),
"DM14SEX2" : ( 603, 604 ),
"DM14_MO2" : ( 604, 606 ),
"DM14_YR2" : ( 606, 610 ),
"DM14SEX3" : ( 610, 611 ),
"DM14_MO3" : ( 611, 613 ),
"DM14_YR3" : ( 613, 617 ),
"DM14SEX4" : ( 617, 618 ),
"DM14_MO4" : ( 618, 620 ),
"DM14_YR4" : ( 620, 624 ),
"DM14SEX5" : ( 624, 625 ),
"DM14_MO5" : ( 625, 627 ),
"DM14_YR5" : ( 627, 631 ),
"DM14SEX6" : ( 631, 632 ),
"DM14_MO6" : ( 632, 634 ),
"DM14_YR6" : ( 634, 638 ),
"DM14SEX7" : ( 638, 639 ),
"DM14_MO7" : ( 639, 641 ),
"DM14_YR7" : ( 641, 645 ),
"DM14SEX8" : ( 645, 646 ),
"DM14_MO8" : ( 646, 648 ),
"DM14_YR8" : ( 648, 652 ),
"DM14SEX9" : ( 652, 653 ),
"DM14_MO9" : ( 653, 655 ),
"DM14_YR9" : ( 655, 659 ),
"DM14SEX10" : ( 659, 660 ),
"DM14_MO10" : ( 660, 662 ),
"DM14_YR10" : ( 662, 666 ),
"DM14SEX11" : ( 666, 667 ),
"DM14_MO11" : ( 667, 669 ),
"DM14_YR11" : ( 669, 673 ),
"DM14SEX12" : ( 673, 674 ),
"DM14_MO12" : ( 674, 676 ),
"DM14_YR12" : ( 676, 680 ),
"DM14SEX13" : ( 680, 681 ),
"DM14_MO13" : ( 681, 683 ),
"DM14_YR13" : ( 683, 687 ),
"DM14SEX14" : ( 687, 688 ),
"DM14_MO14" : ( 688, 690 ),
"DM14_YR14" : ( 690, 694 ),
"DM14SEX15" : ( 694, 695 ),
"DM14_MO15" : ( 695, 697 ),
"DM14_YR15" : ( 697, 701 ),
"DM14SEX16" : ( 701, 702 ),
"DM14_MO16" : ( 702, 704 ),
"DM14_YR16" : ( 704, 708 ),
"DM14SEX17" : ( 708, 709 ),
"DM14_MO17" : ( 709, 711 ),
"DM14_YR17" : ( 711, 715 ),
"DM14SEX18" : ( 715, 716 ),
"DM14_MO18" : ( 716, 718 ),
"DM14_YR18" : ( 718, 722 ),
"DM14SEX19" : ( 722, 723 ),
"DM14_MO19" : ( 723, 725 ),
"DM14_YR19" : ( 725, 729 ),
"DM14SEX20" : ( 729, 730 ),
"DM14_MO20" : ( 730, 732 ),
"DM14_YR20" : ( 732, 736 ),
"DM14SEX21" : ( 736, 737 ),
"DM14_MO21" : ( 737, 739 ),
"DM14_YR21" : ( 739, 743 ),
"DM15" : ( 743, 745 ),
"DM15a" : ( 745, 746 ),
"DM15b" : ( 746, 747 ),
"DM15c" : ( 747, 748 ),
"DM15d_1" : ( 748, 752 ),
"DM15d_2" : ( 752, 756 ),
"DM15d_3" : ( 756, 760 ),
"DM15d_4" : ( 760, 764 ),
"DM15d_chk5" : ( 764, 765 ),
"DM15d_5" : ( 765, 769 ),
"DM15e" : ( 769, 770 ),
"DM16" : ( 770, 772 ),
"DM17" : ( 772, 773 ),
"DM17a" : ( 773, 774 ),
"DM17b" : ( 774, 776 ),
"DM18" : ( 776, 777 ),
"DM18a" : ( 777, 778 ),
"DM18b1_SPECIFY" : ( 778, 858 ),
"DM18b1" : ( 858, 877 ),
"DM18b2" : ( 877, 878 ),
"DM9_CNT" : ( 878, 879 ),
"MH1" : ( 879, 880 ),
"MH2" : ( 880, 881 ),
"MH2a" : ( 881, 1106 ),
"MH3_" : ( 1106, 1107 ),
"MH3YR" : ( 1107, 1111 ),
"MH3_2" : ( 1111, 1112 ),
"MH3YR2" : ( 1112, 1116 ),
"MH3_3" : ( 1116, 1117 ),
"MH3YR3" : ( 1117, 1121 ),
"MH3_4" : ( 1121, 1122 ),
"MH3YR4" : ( 1122, 1126 ),
"MH3_5" : ( 1126, 1127 ),
"MH3YR5" : ( 1127, 1131 ),
"MH3_6" : ( 1131, 1132 ),
"MH3YR6" : ( 1132, 1136 ),
"MH3_7" : ( 1136, 1137 ),
"MH3YR7" : ( 1137, 1141 ),
"MH3_8" : ( 1141, 1142 ),
"MH3YR8" : ( 1142, 1146 ),
"MH3_9" : ( 1146, 1147 ),
"MH3YR9" : ( 1147, 1151 ),
"MH3_10" : ( 1151, 1152 ),
"MH3YR10" : ( 1152, 1156 ),
"MH3_11" : ( 1156, 1157 ),
"MH3YR11" : ( 1157, 1161 ),
"MH3_12" : ( 1161, 1162 ),
"MH3YR12" : ( 1162, 1166 ),
"MH3_13" : ( 1166, 1167 ),
"MH3YR13" : ( 1167, 1171 ),
"MH3_14" : ( 1171, 1172 ),
"MH3YR14" : ( 1172, 1176 ),
"MH3_15" : ( 1176, 1177 ),
"MH3YR15" : ( 1177, 1181 ),
"MH3_16" : ( 1181, 1182 ),
"MH3YR16" : ( 1182, 1186 ),
"MH3_17" : ( 1186, 1187 ),
"MH3YR17" : ( 1187, 1191 ),
"MH3_13SPECIFY" : ( 1191, 1271 ),
"MH3_16SPECIFY" : ( 1271, 1351 ),
"MH3_17SPECIFY" : ( 1351, 1431 ),
"MH4a" : ( 1431, 1433 ),
"MH4b" : ( 1433, 1435 ),
"MH4c" : ( 1435, 1437 ),
"MH5" : ( 1437, 1439 ),
"Mh6a" : ( 1439, 1440 ),
"MH6aSpecify1" : ( 1440, 1520 ),
"MH6aCd1" : ( 1520, 1539 ),
"MH6aAnother1" : ( 1539, 1540 ),
"MH6aSpecify2" : ( 1540, 1620 ),
"MH6aCd2" : ( 1620, 1639 ),
"MH6aAnother2" : ( 1639, 1640 ),
"MH6aSpecify3" : ( 1640, 1720 ),
"MH6aCd3" : ( 1720, 1739 ),
"MH6aAnother3" : ( 1739, 1740 ),
"MH6aSpecify4" : ( 1740, 1820 ),
"MH6aCd4" : ( 1820, 1839 ),
"MH6aAnother4" : ( 1839, 1840 ),
"MH6aSpecify5" : ( 1840, 1920 ),
"MH6aCd5" : ( 1920, 1939 ),
"Mh6a2" : ( 1939, 1940 ),
"MH6aSpecify6" : ( 1940, 2020 ),
"MH6aCd6" : ( 2020, 2039 ),
"MH6aAnother5" : ( 2039, 2040 ),
"MH6aSpecify7" : ( 2040, 2120 ),
"MH6aCd7" : ( 2120, 2139 ),
"MH6aAnother6" : ( 2139, 2140 ),
"MH6aSpecify8" : ( 2140, 2220 ),
"MH6aCd8" : ( 2220, 2239 ),
"MH6aAnother7" : ( 2239, 2240 ),
"MH6aSpecify9" : ( 2240, 2320 ),
"MH6aCd9" : ( 2320, 2339 ),
"MH6aAnother8" : ( 2339, 2340 ),
"MH6aSpecify10" : ( 2340, 2420 ),
"MH6aCd10" : ( 2420, 2439 ),
"Mh6a3" : ( 2439, 2440 ),
"MH6aSpecify11" : ( 2440, 2520 ),
"MH6aCd11" : ( 2520, 2539 ),
"MH6aAnother9" : ( 2539, 2540 ),
"MH6aSpecify12" : ( 2540, 2620 ),
"MH6aCd12" : ( 2620, 2639 ),
"MH6aAnother10" : ( 2639, 2640 ),
"MH6aSpecify13" : ( 2640, 2720 ),
"MH6aCd13" : ( 2720, 2739 ),
"MH6aAnother11" : ( 2739, 2740 ),
"MH6aSpecify14" : ( 2740, 2820 ),
"MH6aCd14" : ( 2820, 2839 ),
"MH6aAnother12" : ( 2839, 2840 ),
"MH6aSpecify15" : ( 2840, 2920 ),
"MH6aCd15" : ( 2920, 2939 ),
"Mh6a4" : ( 2939, 2940 ),
"MH6aSpecify16" : ( 2940, 3020 ),
"MH6aCd16" : ( 3020, 3039 ),
"MH6aAnother13" : ( 3039, 3040 ),
"MH6aSpecify17" : ( 3040, 3120 ),
"MH6aCd17" : ( 3120, 3139 ),
"MH6aAnother14" : ( 3139, 3140 ),
"MH6aSpecify18" : ( 3140, 3220 ),
"MH6aCd18" : ( 3220, 3239 ),
"MH6aAnother15" : ( 3239, 3240 ),
"MH6aSpecify19" : ( 3240, 3320 ),
"MH6aCd19" : ( 3320, 3339 ),
"MH6aAnother16" : ( 3339, 3340 ),
"MH6aSpecify20" : ( 3340, 3420 ),
"MH6aCd20" : ( 3420, 3439 ),
"Mh6a5" : ( 3439, 3440 ),
"MH6aSpecify21" : ( 3440, 3520 ),
"MH6aCd21" : ( 3520, 3539 ),
"MH6aAnother17" : ( 3539, 3540 ),
"MH6aSpecify22" : ( 3540, 3620 ),
"MH6aCd22" : ( 3620, 3639 ),
"MH6aAnother18" : ( 3639, 3640 ),
"MH6aSpecify23" : ( 3640, 3720 ),
"MH6aCd23" : ( 3720, 3739 ),
"MH6aAnother19" : ( 3739, 3740 ),
"MH6aSpecify24" : ( 3740, 3820 ),
"MH6aCd24" : ( 3820, 3839 ),
"MH6aAnother20" : ( 3839, 3840 ),
"MH6aSpecify25" : ( 3840, 3920 ),
"MH6aCd25" : ( 3920, 3939 ),
"Mh6a6" : ( 3939, 3940 ),
"MH6aSpecify26" : ( 3940, 4020 ),
"MH6aCd26" : ( 4020, 4039 ),
"MH6aAnother21" : ( 4039, 4040 ),
"MH6aSpecify27" : ( 4040, 4120 ),
"MH6aCd27" : ( 4120, 4139 ),
"MH6aAnother22" : ( 4139, 4140 ),
"MH6aSpecify28" : ( 4140, 4220 ),
"MH6aCd28" : ( 4220, 4239 ),
"MH6aAnother23" : ( 4239, 4240 ),
"MH6aSpecify29" : ( 4240, 4320 ),
"MH6aCd29" : ( 4320, 4339 ),
"MH6aAnother24" : ( 4339, 4340 ),
"MH6aSpecify30" : ( 4340, 4420 ),
"MH6aCd30" : ( 4420, 4439 ),
"Mh6a7" : ( 4439, 4440 ),
"MH6aSpecify31" : ( 4440, 4520 ),
"MH6aCd31" : ( 4520, 4539 ),
"MH6aAnother25" : ( 4539, 4540 ),
"MH6aSpecify32" : ( 4540, 4620 ),
"MH6aCd32" : ( 4620, 4639 ),
"MH6aAnother26" : ( 4639, 4640 ),
"MH6aSpecify33" : ( 4640, 4720 ),
"MH6aCd33" : ( 4720, 4739 ),
"MH6aAnother27" : ( 4739, 4740 ),
"MH6aSpecify34" : ( 4740, 4820 ),
"MH6aCd34" : ( 4820, 4839 ),
"MH6aAnother28" : ( 4839, 4840 ),
"MH6aSpecify35" : ( 4840, 4920 ),
"MH6aCd35" : ( 4920, 4939 ),
"MH6b" : ( 4939, 4940 ),
"MH6bSpecify1" : ( 4940, 5020 ),
"MH6bCd1" : ( 5020, 5039 ),
"MH6bAnother1" : ( 5039, 5040 ),
"MH6bSpecify2" : ( 5040, 5120 ),
"MH6bCd2" : ( 5120, 5139 ),
"MH6bAnother2" : ( 5139, 5140 ),
"MH6bSpecify3" : ( 5140, 5220 ),
"MH6bCd3" : ( 5220, 5239 ),
"MH6bAnother3" : ( 5239, 5240 ),
"MH6bSpecify4" : ( 5240, 5320 ),
"MH6bCd4" : ( 5320, 5339 ),
"MH6bAnother4" : ( 5339, 5340 ),
"MH6bSpecify5" : ( 5340, 5420 ),
"MH6bCd5" : ( 5420, 5439 ),
"MH6b2" : ( 5439, 5440 ),
"MH6bSpecify6" : ( 5440, 5520 ),
"MH6bCd6" : ( 5520, 5539 ),
"MH6bAnother5" : ( 5539, 5540 ),
"MH6bSpecify7" : ( 5540, 5620 ),
"MH6bCd7" : ( 5620, 5639 ),
"MH6bAnother6" : ( 5639, 5640 ),
"MH6bSpecify8" : ( 5640, 5720 ),
"MH6bCd8" : ( 5720, 5739 ),
"MH6bAnother7" : ( 5739, 5740 ),
"MH6bSpecify9" : ( 5740, 5820 ),
"MH6bCd9" : ( 5820, 5839 ),
"MH6bAnother8" : ( 5839, 5840 ),
"MH6bSpecify10" : ( 5840, 5920 ),
"MH6bCd10" : ( 5920, 5939 ),
"MH6b3" : ( 5939, 5940 ),
"MH6bSpecify11" : ( 5940, 6020 ),
"MH6bCd11" : ( 6020, 6039 ),
"MH6bAnother9" : ( 6039, 6040 ),
"MH6bSpecify12" : ( 6040, 6120 ),
"MH6bCd12" : ( 6120, 6139 ),
"MH6bAnother10" : ( 6139, 6140 ),
"MH6bSpecify13" : ( 6140, 6220 ),
"MH6bCd13" : ( 6220, 6239 ),
"MH6bAnother11" : ( 6239, 6240 ),
"MH6bSpecify14" : ( 6240, 6320 ),
"MH6bCd14" : ( 6320, 6339 ),
"MH6bAnother12" : ( 6339, 6340 ),
"MH6bSpecify15" : ( 6340, 6420 ),
"MH6bCd15" : ( 6420, 6439 ),
"MH6b4" : ( 6439, 6440 ),
"MH6bSpecify16" : ( 6440, 6520 ),
"MH6bCd16" : ( 6520, 6539 ),
"MH6bAnother13" : ( 6539, 6540 ),
"MH6bSpecify17" : ( 6540, 6620 ),
"MH6bCd17" : ( 6620, 6639 ),
"MH6bAnother14" : ( 6639, 6640 ),
"MH6bSpecify18" : ( 6640, 6720 ),
"MH6bCd18" : ( 6720, 6739 ),
"MH6bAnother15" : ( 6739, 6740 ),
"MH6bSpecify19" : ( 6740, 6820 ),
"MH6bCd19" : ( 6820, 6839 ),
"MH6bAnother16" : ( 6839, 6840 ),
"MH6bSpecify20" : ( 6840, 6920 ),
"MH6bCd20" : ( 6920, 6939 ),
"MH6b5" : ( 6939, 6940 ),
"MH6bSpecify21" : ( 6940, 7020 ),
"MH6bCd21" : ( 7020, 7039 ),
"MH6bAnother17" : ( 7039, 7040 ),
"MH6bSpecify22" : ( 7040, 7120 ),
"MH6bCd22" : ( 7120, 7139 ),
"MH6bAnother18" : ( 7139, 7140 ),
"MH6bSpecify23" : ( 7140, 7220 ),
"MH6bCd23" : ( 7220, 7239 ),
"MH6bAnother19" : ( 7239, 7240 ),
"MH6bSpecify24" : ( 7240, 7320 ),
"MH6bCd24" : ( 7320, 7339 ),
"MH6bAnother20" : ( 7339, 7340 ),
"MH6bSpecify25" : ( 7340, 7420 ),
"MH6bCd25" : ( 7420, 7439 ),
"MH6b6" : ( 7439, 7440 ),
"MH6bSpecify26" : ( 7440, 7520 ),
"MH6bCd26" : ( 7520, 7539 ),
"MH6bAnother21" : ( 7539, 7540 ),
"MH6bSpecify27" : ( 7540, 7620 ),
"MH6bCd27" : ( 7620, 7639 ),
"MH6bAnother22" : ( 7639, 7640 ),
"MH6bSpecify28" : ( 7640, 7720 ),
"MH6bCd28" : ( 7720, 7739 ),
"MH6bAnother23" : ( 7739, 7740 ),
"MH6bSpecify29" : ( 7740, 7820 ),
"MH6bCd29" : ( 7820, 7839 ),
"MH6bAnother24" : ( 7839, 7840 ),
"MH6bSpecify30" : ( 7840, 7920 ),
"MH6bCd30" : ( 7920, 7939 ),
"MH6b7" : ( 7939, 7940 ),
"MH6bSpecify31" : ( 7940, 8020 ),
"MH6bCd31" : ( 8020, 8039 ),
"MH6bAnother25" : ( 8039, 8040 ),
"MH6bSpecify32" : ( 8040, 8120 ),
"MH6bCd32" : ( 8120, 8139 ),
"MH6bAnother26" : ( 8139, 8140 ),
"MH6bSpecify33" : ( 8140, 8220 ),
"MH6bCd33" : ( 8220, 8239 ),
"MH6bAnother27" : ( 8239, 8240 ),
"MH6bSpecify34" : ( 8240, 8320 ),
"MH6bCd34" : ( 8320, 8339 ),
"MH6bAnother28" : ( 8339, 8340 ),
"MH6bSpecify35" : ( 8340, 8420 ),
"MH6bCd35" : ( 8420, 8439 ),
"MH6b8" : ( 8439, 8440 ),
"MH6bSpecify36" : ( 8440, 8520 ),
"MH6bCd36" : ( 8520, 8539 ),
"MH6bAnother29" : ( 8539, 8540 ),
"MH6bSpecify37" : ( 8540, 8620 ),
"MH6bCd37" : ( 8620, 8639 ),
"MH6bAnother30" : ( 8639, 8640 ),
"MH6bSpecify38" : ( 8640, 8720 ),
"MH6bCd38" : ( 8720, 8739 ),
"MH6bAnother31" : ( 8739, 8740 ),
"MH6bSpecify39" : ( 8740, 8820 ),
"MH6bCd39" : ( 8820, 8839 ),
"MH6bAnother32" : ( 8839, 8840 ),
"MH6bSpecify40" : ( 8840, 8920 ),
"MH6bCd40" : ( 8920, 8939 ),
"MH8" : ( 8939, 8940 ),
"MH8a1" : ( 8940, 8941 ),
"MH8a2" : ( 8941, 8942 ),
"MH8a3" : ( 8942, 8943 ),
"MH8a4" : ( 8943, 8944 ),
"MH8a5" : ( 8944, 8945 ),
"MH8a6" : ( 8945, 8946 ),
"MH8a7" : ( 8946, 8947 ),
"MH8a8" : ( 8947, 8948 ),
"MH8a8SPecify" : ( 8948, 9028 ),
"MH9" : ( 9028, 9030 ),
"MH9a" : ( 9030, 9032 ),
"MH9b" : ( 9032, 9034 ),
"MH10" : ( 9034, 9035 ),
"MH10a" : ( 9035, 9036 ),
"TB1c1" : ( 9036, 9037 ),
"TB1c_ao1" : ( 9037, 9039 ),
"TB1c_o1" : ( 9039, 9040 ),
"TB1c_ar1" : ( 9040, 9042 ),
"TB1c_r1" : ( 9042, 9043 ),
"TB1c2" : ( 9043, 9044 ),
"TB1c_ao2" : ( 9044, 9046 ),
"TB1c_o2" : ( 9046, 9047 ),
"TB1c_ar2" : ( 9047, 9049 ),
"TB1c_r2" : ( 9049, 9050 ),
"TB1c3" : ( 9050, 9051 ),
"TB1c_ao3" : ( 9051, 9053 ),
"TB1c_o3" : ( 9053, 9054 ),
"TB1c_ar3" : ( 9054, 9056 ),
"TB1c_r3" : ( 9056, 9057 ),
"TB1c4" : ( 9057, 9058 ),
"TB1c_ao4" : ( 9058, 9060 ),
"TB1c_o4" : ( 9060, 9061 ),
"TB1c_ar4" : ( 9061, 9063 ),
"TB1c_r4" : ( 9063, 9064 ),
"TB1d" : ( 9064, 9065 ),
"TBd_ao1" : ( 9065, 9067 ),
"TBd_o1" : ( 9067, 9068 ),
"TB3" : ( 9068, 9069 ),
"TB3_1" : ( 9069, 9070 ),
"TB3a" : ( 9070, 9073 ),
"TB4a" : ( 9073, 9074 ),
"TB4a1" : ( 9074, 9075 ),
"TB4b" : ( 9075, 9078 ),
"TB4b1" : ( 9078, 9079 ),
"TB4cNum" : ( 9079, 9081 ),
"TB4cUnit" : ( 9081, 9082 ),
"TB_ao4" : ( 9082, 9085 ),
"TB_o4" : ( 9085, 9086 ),
"TB_ar4" : ( 9086, 9089 ),
"TB_r4" : ( 9089, 9090 ),
"TB5x" : ( 9090, 9092 ),
"TB5xDK" : ( 9092, 9093 ),
"TB5" : ( 9093, 9096 ),
"TB5a" : ( 9096, 9097 ),
"TB6" : ( 9097, 9098 ),
"TB7" : ( 9098, 9099 ),
"TB8" : ( 9099, 9100 ),
"TB9" : ( 9100, 9101 ),
"TB10" : ( 9101, 9102 ),
"TB10a" : ( 9102, 9104 ),
"TB10a1" : ( 9104, 9105 ),
"TB10b" : ( 9105, 9107 ),
"TB10b1" : ( 9107, 9108 ),
"TB10cNum" : ( 9108, 9110 ),
"TB10cUNIT" : ( 9110, 9111 ),
"TB10c1" : ( 9111, 9112 ),
"TB11" : ( 9112, 9113 ),
"TB12" : ( 9113, 9114 ),
"TB12a" : ( 9114, 9115 ),
"TB13" : ( 9115, 9116 ),
"TB13A" : ( 9116, 9117 ),
"TB13B" : ( 9117, 9118 ),
"TB14" : ( 9118, 9119 ),
"TB14a" : ( 9119, 9120 ),
"TB14b" : ( 9120, 9123 ),
"TB14b1" : ( 9123, 9124 ),
"TB14C" : ( 9124, 9125 ),
"TB14c1" : ( 9125, 9126 ),
"TB14d" : ( 9126, 9127 ),
"TB15NUM" : ( 9127, 9129 ),
"TB15UNIT" : ( 9129, 9130 ),
"TB15a" : ( 9130, 9131 ),
"TB15b" : ( 9131, 9132 ),
"TB15c" : ( 9132, 9133 ),
"TB15d" : ( 9133, 9134 ),
"TB15Specify" : ( 9134, 9359 ),
"TB_ao15" : ( 9359, 9361 ),
"TB_o15" : ( 9361, 9362 ),
"TB_ar15" : ( 9362, 9364 ),
"TB_r15" : ( 9364, 9365 ),
"TB16_1" : ( 9365, 9366 ),
"TB16_2" : ( 9366, 9367 ),
"TB16_3" : ( 9367, 9368 ),
"TB16_4" : ( 9368, 9369 ),
"TB16_5" : ( 9369, 9370 ),
"TB16_6" : ( 9370, 9371 ),
"TB16_7" : ( 9371, 9372 ),
"TB16_8" : ( 9372, 9373 ),
"TB16_9" : ( 9373, 9374 ),
"TB16c" : ( 9374, 9375 ),
"TB16d" : ( 9375, 9376 ),
"TB17" : ( 9376, 9377 ),
"TB17a" : ( 9377, 9378 ),
"TB17Specify" : ( 9378, 9603 ),
"TB17b" : ( 9603, 9604 ),
"TB18" : ( 9604, 9605 ),
"TB18SPecify" : ( 9605, 9685 ),
"TB18Code" : ( 9685, 9704 ),
"TB18a" : ( 9704, 9705 ),
"TB19" : ( 9705, 9706 ),
"TB19Specify" : ( 9706, 9786 ),
"TB19Code" : ( 9786, 9805 ),
"TB20a" : ( 9805, 9806 ),
"TB20b" : ( 9806, 9807 ),
"TB20c" : ( 9807, 9808 ),
"TB20d" : ( 9808, 9809 ),
"TBqSx" : ( 9809, 9810 ),
"TBqSx2" : ( 9810, 9811 ),
"TBqSx3" : ( 9811, 9812 ),
"TBqSx4" : ( 9812, 9813 ),
"TBqSx5" : ( 9813, 9814 ),
"TBqSx6" : ( 9814, 9815 ),
"TBqSx7" : ( 9815, 9816 ),
"TBqSx8" : ( 9816, 9817 ),
"TBqSx9" : ( 9817, 9818 ),
"TBqSx10" : ( 9818, 9819 ),
"TBqSx11" : ( 9819, 9820 ),
"TBqSx12" : ( 9820, 9821 ),
"TBqSx13" : ( 9821, 9822 ),
"TBqSx14" : ( 9822, 9823 ),
"TBYrCl" : ( 9823, 9825 ),
"TB_ao21" : ( 9825, 9827 ),
"TB_ar21" : ( 9827, 9829 ),
"TBCrit1" : ( 9829, 9848 ),
"TBCrit2" : ( 9848, 9867 ),
"TBCrit3" : ( 9867, 9886 ),
"TBCrit4" : ( 9886, 9905 ),
"TBCrit5" : ( 9905, 9924 ),
"TBCrit6" : ( 9924, 9943 ),
"TBCrit7" : ( 9943, 9962 ),
"TBSxNum01" : ( 9962, 9981 ),
"TBSxNum02" : ( 9981, 10000 ),
"TBSxNum03" : ( 10000, 10019 ),
"TBSxNum04" : ( 10019, 10038 ),
"TBSxNum05" : ( 10038, 10057 ),
"TBSxNum06" : ( 10057, 10076 ),
"TBSxNum07" : ( 10076, 10095 ),
"TBSxNum08" : ( 10095, 10114 ),
"TBSxNum09" : ( 10114, 10133 ),
"TBSxNum10" : ( 10133, 10152 ),
"TBSxNum11" : ( 10152, 10171 ),
"TBSxNum12" : ( 10171, 10190 ),
"TBSxNum13" : ( 10190, 10209 ),
"TBSxNum14" : ( 10209, 10228 ),
"TBSxList01" : ( 10228, 10483 ),
"TBSxList02" : ( 10483, 10738 ),
"TBSxList03" : ( 10738, 10993 ),
"TBSxList04" : ( 10993, 11248 ),
"TBSxList05" : ( 11248, 11503 ),
"TBSxList06" : ( 11503, 11758 ),
"TBSxList07" : ( 11758, 12013 ),
"TBSxList08" : ( 12013, 12268 ),
"TBSxList09" : ( 12268, 12523 ),
"TBSxList10" : ( 12523, 12778 ),
"TBSxList11" : ( 12778, 13033 ),
"TBSxList12" : ( 13033, 13288 ),
"TBSxList13" : ( 13288, 13543 ),
"TBSxList14" : ( 13543, 13798 ),
"TBSxPastList01" : ( 13798, 14053 ),
"TBSxPastList02" : ( 14053, 14308 ),
"TBSxPastList03" : ( 14308, 14563 ),
"TBSxPastList04" : ( 14563, 14818 ),
"TBSxPastList05" : ( 14818, 15073 ),
"TBSxPastList06" : ( 15073, 15328 ),
"TBSxPastList07" : ( 15328, 15583 ),
"TBSxPastList08" : ( 15583, 15838 ),
"TBSxPastList09" : ( 15838, 16093 ),
"TBSxPastList10" : ( 16093, 16348 ),
"TBSxPastList11" : ( 16348, 16603 ),
"TBSxPastList12" : ( 16603, 16858 ),
"TBSxPastList13" : ( 16858, 17113 ),
"TBSxPastList14" : ( 17113, 17368 ),
"Varname01" : ( 17368, 17623 ),
"Varname02" : ( 17623, 17878 ),
"Varname03" : ( 17878, 18133 ),
"Varname04" : ( 18133, 18388 ),
"Varname05" : ( 18388, 18643 ),
"Varname06" : ( 18643, 18898 ),
"Varname07" : ( 18898, 19153 ),
"Varname08" : ( 19153, 19408 ),
"Varname09" : ( 19408, 19663 ),
"Varname10" : ( 19663, 19918 ),
"Varname11" : ( 19918, 20173 ),
"Varname12" : ( 20173, 20428 ),
"Varname13" : ( 20428, 20683 ),
"Varname14" : ( 20683, 20938 ),
"TB4bCl" : ( 20938, 20939 ),
"TB10Cl" : ( 20939, 20940 ),
"TB11Cl" : ( 20940, 20941 ),
"TB12Cl" : ( 20941, 20942 ),
"TB12aCl" : ( 20942, 20943 ),
"TB14Cl" : ( 20943, 20944 ),
"TB14dCl" : ( 20944, 20945 ),
"TB16Sx1" : ( 20945, 21200 ),
"TB16Sx2" : ( 21200, 21455 ),
"TB16Sx3" : ( 21455, 21710 ),
"TB16Sx4" : ( 21710, 21965 ),
"TB16Sx5" : ( 21965, 22220 ),
"TB16Sx6" : ( 22220, 22475 ),
"TB16Sx7" : ( 22475, 22730 ),
"TB16Sx8" : ( 22730, 22985 ),
"TB16Sx9" : ( 22985, 23240 ),
"TB16aCl" : ( 23240, 23241 ),
"TB16dCl" : ( 23241, 23242 ),
"TB17bCl" : ( 23242, 23243 ),
"TB18aCl" : ( 23243, 23244 ),
"TB19Cl" : ( 23244, 23245 ),
"TB20cCl" : ( 23245, 23246 ),
"TB20dCl" : ( 23246, 23247 ),
"RecYrIntrvl" : ( 23247, 23266 ),
"MonthsElapsed" : ( 23266, 23285 ),
"Past6Mos" : ( 23285, 23286 ),
"Past12Mos" : ( 23286, 23287 ),
"AL1AgeRec" : ( 23287, 23306 ),
"AL1" : ( 23306, 23307 ),
"AL1a" : ( 23307, 23308 ),
"AL1AgeOns" : ( 23308, 23310 ),
"AL1Ons" : ( 23310, 23311 ),
"AL2a1" : ( 23311, 23312 ),
"AL2a1a" : ( 23312, 23313 ),
"AL2a2" : ( 23313, 23314 ),
"AL2a3" : ( 23314, 23315 ),
"AL2a4" : ( 23315, 23316 ),
"AL2a5" : ( 23316, 23317 ),
"AL2a6" : ( 23317, 23318 ),
"AL2b1" : ( 23318, 23319 ),
"AL2b2" : ( 23319, 23320 ),
"AL2b3" : ( 23320, 23321 ),
"AL2b4" : ( 23321, 23322 ),
"AL2b5" : ( 23322, 23323 ),
"AL2b6" : ( 23323, 23324 ),
"NOW" : ( 23324, 23332 ),
"DayToday" : ( 23332, 23333 ),
"AL3" : ( 23333, 23334 ),
"BEER" : ( 23334, 23336 ),
"WINE" : ( 23336, 23338 ),
"LIQUOR" : ( 23338, 23340 ),
"OTHER" : ( 23340, 23342 ),
"SPECIFY" : ( 23342, 23372 ),
"BEER2" : ( 23372, 23374 ),
"WINE2" : ( 23374, 23376 ),
"LIQUOR2" : ( 23376, 23378 ),
"OTHER2" : ( 23378, 23380 ),
"SPECIFY2" : ( 23380, 23410 ),
"BEER3" : ( 23410, 23412 ),
"WINE3" : ( 23412, 23414 ),
"LIQUOR3" : ( 23414, 23416 ),
"OTHER3" : ( 23416, 23418 ),
"SPECIFY3" : ( 23418, 23448 ),
"BEER4" : ( 23448, 23450 ),
"WINE4" : ( 23450, 23452 ),
"LIQUOR4" : ( 23452, 23454 ),
"OTHER4" : ( 23454, 23456 ),
"SPECIFY4" : ( 23456, 23486 ),
"BEER5" : ( 23486, 23488 ),
"WINE5" : ( 23488, 23490 ),
"LIQUOR5" : ( 23490, 23492 ),
"OTHER5" : ( 23492, 23494 ),
"SPECIFY5" : ( 23494, 23524 ),
"BEER6" : ( 23524, 23526 ),
"WINE6" : ( 23526, 23528 ),
"LIQUOR6" : ( 23528, 23530 ),
"OTHER6" : ( 23530, 23532 ),
"SPECIFY6" : ( 23532, 23562 ),
"BEER7" : ( 23562, 23564 ),
"WINE7" : ( 23564, 23566 ),
"LIQUOR7" : ( 23566, 23568 ),
"OTHER7" : ( 23568, 23570 ),
"SPECIFY7" : ( 23570, 23600 ),
"DrinkSum31" : ( 23600, 23619 ),
"DrinkSum32" : ( 23619, 23638 ),
"DrinkSum33" : ( 23638, 23657 ),
"DrinkSum34" : ( 23657, 23676 ),
"DrinkSum35" : ( 23676, 23695 ),
"DrinkSum36" : ( 23695, 23714 ),
"DrinkSum37" : ( 23714, 23733 ),
"AL3c_mo" : ( 23733, 23735 ),
"AL3c_yr" : ( 23735, 23739 ),
"AL3AgeRec" : ( 23739, 23741 ),
"AL3Rec" : ( 23741, 23742 ),
"AL3c1" : ( 23742, 23743 ),
"AL3d" : ( 23743, 23744 ),
"AL4a" : ( 23744, 23746 ),
"BEER8" : ( 23746, 23748 ),
"WINE8" : ( 23748, 23750 ),
"LIQUOR8" : ( 23750, 23752 ),
"OTHER8" : ( 23752, 23754 ),
"SPECIFY8" : ( 23754, 23784 ),
"BEER9" : ( 23784, 23786 ),
"WINE9" : ( 23786, 23788 ),
"LIQUOR9" : ( 23788, 23790 ),
"OTHER9" : ( 23790, 23792 ),
"SPECIFY9" : ( 23792, 23822 ),
"BEER10" : ( 23822, 23824 ),
"WINE10" : ( 23824, 23826 ),
"LIQUOR10" : ( 23826, 23828 ),
"OTHER10" : ( 23828, 23830 ),
"SPECIFY10" : ( 23830, 23860 ),
"BEER11" : ( 23860, 23862 ),
"WINE11" : ( 23862, 23864 ),
"LIQUOR11" : ( 23864, 23866 ),
"OTHER11" : ( 23866, 23868 ),
"SPECIFY11" : ( 23868, 23898 ),
"BEER12" : ( 23898, 23900 ),
"WINE12" : ( 23900, 23902 ),
"LIQUOR12" : ( 23902, 23904 ),
"OTHER12" : ( 23904, 23906 ),
"SPECIFY12" : ( 23906, 23936 ),
"BEER13" : ( 23936, 23938 ),
"WINE13" : ( 23938, 23940 ),
"LIQUOR13" : ( 23940, 23942 ),
"OTHER13" : ( 23942, 23944 ),
"SPECIFY13" : ( 23944, 23974 ),
"BEER14" : ( 23974, 23976 ),
"WINE14" : ( 23976, 23978 ),
"LIQUOR14" : ( 23978, 23980 ),
"OTHER14" : ( 23980, 23982 ),
"SPECIFY14" : ( 23982, 24012 ),
"DrinkSum41" : ( 24012, 24031 ),
"DrinkSum42" : ( 24031, 24050 ),
"DrinkSum43" : ( 24050, 24069 ),
"DrinkSum44" : ( 24069, 24088 ),
"DrinkSum45" : ( 24088, 24107 ),
"DrinkSum46" : ( 24107, 24126 ),
"DrinkSum47" : ( 24126, 24145 ),
"AL4c" : ( 24145, 24146 ),
"AL4d" : ( 24146, 24149 ),
"AL4e1" : ( 24149, 24151 ),
"AL4e2" : ( 24151, 24153 ),
"AL4e3" : ( 24153, 24155 ),
"AL4fx" : ( 24155, 24156 ),
"AL4f1" : ( 24156, 24158 ),
"AL4f2" : ( 24158, 24160 ),
"AL4f3" : ( 24160, 24162 ),
"AL4AgeOns" : ( 24162, 24164 ),
"AL4AgeRec" : ( 24164, 24166 ),
"AL4Rec" : ( 24166, 24167 ),
"AL5" : ( 24167, 24169 ),
"AL5a" : ( 24169, 24171 ),
"AL5a1" : ( 24171, 24172 ),
"AL5b" : ( 24172, 24173 ),
"AL5c" : ( 24173, 24174 ),
"AL6" : ( 24174, 24177 ),
"AL6_1" : ( 24177, 24178 ),
"AL6a" : ( 24178, 24181 ),
"AL8" : ( 24181, 24182 ),
"AL8a" : ( 24182, 24184 ),
"AL8b" : ( 24184, 24185 ),
"AL8AgeOns" : ( 24185, 24187 ),
"AL8Ons" : ( 24187, 24188 ),
"AL8d" : ( 24188, 24192 ),
"AL9" : ( 24192, 24193 ),
"AL9a1" : ( 24193, 24196 ),
"AL9a2" : ( 24196, 24199 ),
"AL9bAgeOns" : ( 24199, 24201 ),
"AL9bOns" : ( 24201, 24202 ),
"AL9bAgeRec" : ( 24202, 24204 ),
"AL9bRec" : ( 24204, 24205 ),
"AL9c" : ( 24205, 24206 ),
"AL9d" : ( 24206, 24207 ),
"AL9e" : ( 24207, 24208 ),
"AL9f1" : ( 24208, 24211 ),
"AL9f2" : ( 24211, 24214 ),
"AL9gAgeOns" : ( 24214, 24216 ),
"AL9gOns" : ( 24216, 24217 ),
"AL9gAgeRec" : ( 24217, 24219 ),
"AL9gRec" : ( 24219, 24220 ),
"AL9h" : ( 24220, 24221 ),
"AL9i" : ( 24221, 24222 ),
"AL10" : ( 24222, 24223 ),
"AL10aAgeOns" : ( 24223, 24225 ),
"AL10aOns" : ( 24225, 24226 ),
"AL10aAgeRec" : ( 24226, 24228 ),
"AL10aRec" : ( 24228, 24229 ),
"AL10b" : ( 24229, 24230 ),
"AL10c" : ( 24230, 24231 ),
"AL10d" : ( 24231, 24234 ),
"AL10d1" : ( 24234, 24235 ),
"AL10eAgeOns" : ( 24235, 24237 ),
"AL10eOns" : ( 24237, 24238 ),
"AL10eAgeRec" : ( 24238, 24240 ),
"AL10eRec" : ( 24240, 24241 ),
"AL11" : ( 24241, 24242 ),
"AL11AgeOns" : ( 24242, 24244 ),
"AL11Ons" : ( 24244, 24245 ),
"AL11AgeRec" : ( 24245, 24247 ),
"AL11Rec" : ( 24247, 24248 ),
"AL12" : ( 24248, 24249 ),
"AL12a" : ( 24249, 24250 ),
"AL12AgeOns" : ( 24250, 24252 ),
"AL12Ons" : ( 24252, 24253 ),
"AL12AgeRec" : ( 24253, 24255 ),
"AL12Rec" : ( 24255, 24256 ),
"AL12c" : ( 24256, 24257 ),
"AL13" : ( 24257, 24258 ),
"AL13AgeOns" : ( 24258, 24260 ),
"AL13Ons" : ( 24260, 24261 ),
"AL13AgeRec" : ( 24261, 24263 ),
"AL13Rec" : ( 24263, 24264 ),
"AL13b" : ( 24264, 24265 ),
"AL14" : ( 24265, 24266 ),
"AL14AgeOns" : ( 24266, 24268 ),
"AL14Ons" : ( 24268, 24269 ),
"AL14AgeRec" : ( 24269, 24271 ),
"AL14Rec" : ( 24271, 24272 ),
"AL14b" : ( 24272, 24273 ),
"AL15" : ( 24273, 24274 ),
"AL15a" : ( 24274, 24275 ),
"AL15AgeOns" : ( 24275, 24277 ),
"AL15Ons" : ( 24277, 24278 ),
"AL15AgeRec" : ( 24278, 24280 ),
"AL15Rec" : ( 24280, 24281 ),
"AL16" : ( 24281, 24282 ),
"AL16a" : ( 24282, 24283 ),
"AL16b" : ( 24283, 24286 ),
"AL16b1" : ( 24286, 24287 ),
"AL16AgeOns" : ( 24287, 24289 ),
"AL16Ons" : ( 24289, 24290 ),
"AL16AgeRec" : ( 24290, 24292 ),
"AL16Rec" : ( 24292, 24293 ),
"AL16d" : ( 24293, 24294 ),
"AL17" : ( 24294, 24295 ),
"AL17AgeOns" : ( 24295, 24297 ),
"AL17Ons" : ( 24297, 24298 ),
"AL17AgeRec" : ( 24298, 24300 ),
"AL17Rec" : ( 24300, 24301 ),
"AL17b" : ( 24301, 24304 ),
"AL17b1" : ( 24304, 24305 ),
"AL18" : ( 24305, 24306 ),
"AL18a" : ( 24306, 24307 ),
"AL18AgeOns" : ( 24307, 24309 ),
"AL18Ons" : ( 24309, 24310 ),
"AL18AgeRec" : ( 24310, 24312 ),
"AL18Rec" : ( 24312, 24313 ),
"AL18c" : ( 24313, 24314 ),
"AL19" : ( 24314, 24315 ),
"AL19AgeOns" : ( 24315, 24317 ),
"AL19Ons" : ( 24317, 24318 ),
"AL19AgeRec" : ( 24318, 24320 ),
"AL19Rec" : ( 24320, 24321 ),
"AL20" : ( 24321, 24322 ),
"AL20AgeOns" : ( 24322, 24324 ),
"AL20Ons" : ( 24324, 24325 ),
"AL21" : ( 24325, 24326 ),
"AL21Another" : ( 24326, 24327 ),
"AL21a_Specify" : ( 24327, 24407 ),
"AL21a_Code" : ( 24407, 24426 ),
"AL21Another2" : ( 24426, 24427 ),
"AL21a_Specify2" : ( 24427, 24507 ),
"AL21a_Code2" : ( 24507, 24526 ),
"AL21Another3" : ( 24526, 24527 ),
"AL21a_Specify3" : ( 24527, 24607 ),
"AL21a_Code3" : ( 24607, 24626 ),
"AL21Another4" : ( 24626, 24627 ),
"AL21a_Specify4" : ( 24627, 24707 ),
"AL21a_Code4" : ( 24707, 24726 ),
"AL21_SPECIFY" : ( 24726, 24951 ),
"AL21AgeOns" : ( 24951, 24953 ),
"AL21Ons" : ( 24953, 24954 ),
"AL21AgeRec" : ( 24954, 24956 ),
"AL21Rec" : ( 24956, 24957 ),
"AL21c" : ( 24957, 24958 ),
"AL21d" : ( 24958, 24959 ),
"AL21d_SPECIFY" : ( 24959, 25184 ),
"AL22" : ( 25184, 25185 ),
"AL22AgeOns" : ( 25185, 25187 ),
"AL22Ons" : ( 25187, 25188 ),
"AL22AgeRec" : ( 25188, 25190 ),
"AL22Rec" : ( 25190, 25191 ),
"AL22b" : ( 25191, 25193 ),
"AL22b1" : ( 25193, 25194 ),
"AL22c" : ( 25194, 25195 ),
"AL23" : ( 25195, 25196 ),
"AL23b" : ( 25196, 25198 ),
"AL23AgeOns" : ( 25198, 25200 ),
"AL23Ons" : ( 25200, 25201 ),
"AL23AgeRec" : ( 25201, 25203 ),
"AL23Rec" : ( 25203, 25204 ),
"AL23b1" : ( 25204, 25205 ),
"AL23c" : ( 25205, 25206 ),
"AL24" : ( 25206, 25207 ),
"AL24b" : ( 25207, 25209 ),
"AL24AgeOns" : ( 25209, 25211 ),
"AL24Ons" : ( 25211, 25212 ),
"AL24AgeRec" : ( 25212, 25214 ),
"AL24Rec" : ( 25214, 25215 ),
"AL24b1" : ( 25215, 25216 ),
"AL24c" : ( 25216, 25217 ),
"AL25" : ( 25217, 25218 ),
"AL25AgeOns" : ( 25218, 25220 ),
"AL25Ons" : ( 25220, 25221 ),
"AL25AgeRec" : ( 25221, 25223 ),
"AL25Rec" : ( 25223, 25224 ),
"AL25b" : ( 25224, 25225 ),
"AL26a1" : ( 25225, 25226 ),
"AL26a2" : ( 25226, 25227 ),
"AL26a3" : ( 25227, 25228 ),
"AL26a4" : ( 25228, 25229 ),
"AL26a5" : ( 25229, 25230 ),
"AL26a6" : ( 25230, 25231 ),
"AL26a7" : ( 25231, 25232 ),
"AL26a8" : ( 25232, 25233 ),
"AL1_ao26" : ( 25233, 25235 ),
"AL2_ao26" : ( 25235, 25237 ),
"AL3_ao26" : ( 25237, 25239 ),
"AL4_ao26" : ( 25239, 25241 ),
"AL5_ao26" : ( 25241, 25243 ),
"AL6_ao26" : ( 25243, 25245 ),
"AL7_ao26" : ( 25245, 25247 ),
"AL8_ao26" : ( 25247, 25249 ),
"AL26b1" : ( 25249, 25250 ),
"AL26b2" : ( 25250, 25251 ),
"AL26b3" : ( 25251, 25252 ),
"AL26b4" : ( 25252, 25253 ),
"AL26b5" : ( 25253, 25254 ),
"AL26b6" : ( 25254, 25255 ),
"AL26b7" : ( 25255, 25256 ),
"AL26b8" : ( 25256, 25257 ),
"AL26AgeRec" : ( 25257, 25259 ),
"AL26Rec" : ( 25259, 25260 ),
"AL26c" : ( 25260, 25261 ),
"AL27" : ( 25261, 25262 ),
"AL27AgeOns" : ( 25262, 25264 ),
"AL27Ons" : ( 25264, 25265 ),
"AL27AgeRec" : ( 25265, 25267 ),
"AL27Rec" : ( 25267, 25268 ),
"AL27b" : ( 25268, 25269 ),
"AL27c" : ( 25269, 25270 ),
"AL28" : ( 25270, 25271 ),
"AL28AgeOns" : ( 25271, 25273 ),
"AL28Ons" : ( 25273, 25274 ),
"AL28AgeRec" : ( 25274, 25276 ),
"AL28Rec" : ( 25276, 25277 ),
"AL28b" : ( 25277, 25279 ),
"AL28b1" : ( 25279, 25280 ),
"AL28c" : ( 25280, 25281 ),
"AL29" : ( 25281, 25282 ),
"AL29AgeOns" : ( 25282, 25284 ),
"AL29Ons" : ( 25284, 25285 ),
"AL29AgeRec" : ( 25285, 25287 ),
"AL29Rec" : ( 25287, 25288 ),
"AL29b" : ( 25288, 25290 ),
"AL29b1" : ( 25290, 25291 ),
"AL29c" : ( 25291, 25292 ),
"AL31_1" : ( 25292, 25293 ),
"AL31_2" : ( 25293, 25294 ),
"AL31_3" : ( 25294, 25295 ),
"AL31_4" : ( 25295, 25296 ),
"AL31_5" : ( 25296, 25297 ),
"AL31_6" : ( 25297, 25298 ),
"AL31_7" : ( 25298, 25299 ),
"AL31_7SPECIFY" : ( 25299, 25379 ),
"AL31AgeOns" : ( 25379, 25381 ),
"AL31Ons" : ( 25381, 25382 ),
"AL31b" : ( 25382, 25383 ),
"AL32" : ( 25383, 25384 ),
"AL32_specify1" : ( 25384, 25464 ),
"AL32_code1" : ( 25464, 25467 ),
"AL32_specify2" : ( 25467, 25547 ),
"AL32_code2" : ( 25547, 25550 ),
"AL32AgeOns" : ( 25550, 25552 ),
"AL32Ons" : ( 25552, 25553 ),
"AL32Another" : ( 25553, 25554 ),
"AL32AgeRec" : ( 25554, 25556 ),
"AL32Rec" : ( 25556, 25557 ),
"AL32c" : ( 25557, 25558 ),
"AL33_1" : ( 25558, 25559 ),
"AL33_2" : ( 25559, 25560 ),
"AL33_3" : ( 25560, 25561 ),
"AL33_4" : ( 25561, 25562 ),
"AL33_5" : ( 25562, 25563 ),
"AL33a" : ( 25563, 25564 ),
"AL33AgeOns" : ( 25564, 25566 ),
"AL33Ons" : ( 25566, 25567 ),
"AL33AgeRec" : ( 25567, 25569 ),
"AL33Rec" : ( 25569, 25570 ),
"AL34" : ( 25570, 25571 ),
"AL34AgeOns" : ( 25571, 25573 ),
"AL34Ons" : ( 25573, 25574 ),
"AL35" : ( 25574, 25575 ),
"AL35AgeOns" : ( 25575, 25577 ),
"AL35Ons" : ( 25577, 25578 ),
"al37aQsx" : ( 25578, 25579 ),
"al37aQsx2" : ( 25579, 25580 ),
"al37aQsx3" : ( 25580, 25581 ),
"al37aQsx4" : ( 25581, 25582 ),
"al37aQsx5" : ( 25582, 25583 ),
"al37aQsx6" : ( 25583, 25584 ),
"al37aQsx7" : ( 25584, 25585 ),
"al37aQsx8" : ( 25585, 25586 ),
"al37aQsx9" : ( 25586, 25587 ),
"al37aQsx10" : ( 25587, 25588 ),
"AL37_1" : ( 25588, 25589 ),
"AL37_2" : ( 25589, 25590 ),
"AL37_3" : ( 25590, 25591 ),
"AL37_4" : ( 25591, 25592 ),
"AL37_5" : ( 25592, 25593 ),
"AL37_6" : ( 25593, 25594 ),
"AL37_7" : ( 25594, 25595 ),
"AL37_8" : ( 25595, 25596 ),
"AL37_9" : ( 25596, 25597 ),
"AL37_10" : ( 25597, 25598 ),
"AL37a1" : ( 25598, 25599 ),
"AL37a2" : ( 25599, 25600 ),
"AL37a3" : ( 25600, 25601 ),
"AL37a4" : ( 25601, 25602 ),
"AL37a5" : ( 25602, 25603 ),
"AL37a6" : ( 25603, 25604 ),
"AL37a7" : ( 25604, 25605 ),
"AL37a8" : ( 25605, 25606 ),
"AL37a9" : ( 25606, 25607 ),
"AL37a10" : ( 25607, 25608 ),
"AL37b" : ( 25608, 25611 ),
"AL37c" : ( 25611, 25612 ),
"AL37aAgeOns" : ( 25612, 25614 ),
"AL37aOns" : ( 25614, 25615 ),
"AL37aAgeRec" : ( 25615, 25617 ),
"AL37aRec" : ( 25617, 25618 ),
"AL37eAgeOns" : ( 25618, 25620 ),
"AL37eOns" : ( 25620, 25621 ),
"AL37eAgeRec" : ( 25621, 25623 ),
"AL37eRec" : ( 25623, 25624 ),
"Al37aCount" : ( 25624, 25626 ),
"al37TOT" : ( 25626, 25628 ),
"AL37aCluster" : ( 25628, 25629 ),
"AL37bCluster" : ( 25629, 25630 ),
"Al37cCluster" : ( 25630, 25631 ),
"AL37f" : ( 25631, 25634 ),
"AL37f1" : ( 25634, 25635 ),
"AL37g" : ( 25635, 25636 ),
"AL37h" : ( 25636, 25637 ),
"AL37hAgeOns" : ( 25637, 25639 ),
"AL37hOns" : ( 25639, 25640 ),
"AL37hAgeRec" : ( 25640, 25642 ),
"AL37hRec" : ( 25642, 25643 ),
"AL37i" : ( 25643, 25644 ),
"AL37j" : ( 25644, 25645 ),
"Al37Another" : ( 25645, 25646 ),
"AL37_Specify" : ( 25646, 25726 ),
"AL37_Code" : ( 25726, 25745 ),
"Al37Another2" : ( 25745, 25746 ),
"AL37_Specify2" : ( 25746, 25826 ),
"AL37_Code2" : ( 25826, 25845 ),
"Al37Another3" : ( 25845, 25846 ),
"AL37_Specify3" : ( 25846, 25926 ),
"AL37_Code3" : ( 25926, 25945 ),
"Al37Another4" : ( 25945, 25946 ),
"AL37_Specify4" : ( 25946, 26026 ),
"AL37_Code4" : ( 26026, 26045 ),
"AL38" : ( 26045, 26046 ),
"AL38aAgeOns" : ( 26046, 26048 ),
"AL38aOns" : ( 26048, 26049 ),
"AL38aAgeRec" : ( 26049, 26051 ),
"AL38aRec" : ( 26051, 26052 ),
"AL38b" : ( 26052, 26055 ),
"AL38b1" : ( 26055, 26056 ),
"AL38c" : ( 26056, 26057 ),
"AL38cAgeOns" : ( 26057, 26059 ),
"AL38cOns" : ( 26059, 26060 ),
"AL38cAgeRec" : ( 26060, 26062 ),
"AL38cRec" : ( 26062, 26063 ),
"AL38d" : ( 26063, 26064 ),
"Al38Another" : ( 26064, 26065 ),
"AL38_Specify" : ( 26065, 26145 ),
"AL38_Code" : ( 26145, 26164 ),
"Al38Another2" : ( 26164, 26165 ),
"AL38_Specify2" : ( 26165, 26245 ),
"AL38_Code2" : ( 26245, 26264 ),
"Al38Another3" : ( 26264, 26265 ),
"AL38_Specify3" : ( 26265, 26345 ),
"AL38_Code3" : ( 26345, 26364 ),
"Al38Another4" : ( 26364, 26365 ),
"AL38_Specify4" : ( 26365, 26445 ),
"AL38_Code4" : ( 26445, 26464 ),
"AL39Another" : ( 26464, 26465 ),
"AL39_Specify" : ( 26465, 26545 ),
"AL39_Code" : ( 26545, 26564 ),
"AL39Another2" : ( 26564, 26565 ),
"AL39_Specify2" : ( 26565, 26645 ),
"AL39_Code2" : ( 26645, 26664 ),
"AL39Another3" : ( 26664, 26665 ),
"AL39_Specify3" : ( 26665, 26745 ),
"AL39_Code3" : ( 26745, 26764 ),
"AL39Another4" : ( 26764, 26765 ),
"AL39_Specify4" : ( 26765, 26845 ),
"AL39_Code4" : ( 26845, 26864 ),
"AL39" : ( 26864, 26865 ),
"AL39aAgeOns" : ( 26865, 26867 ),
"AL39aOns" : ( 26867, 26868 ),
"AL39aAgeRec" : ( 26868, 26870 ),
"AL39aRec" : ( 26870, 26871 ),
"AL39b" : ( 26871, 26874 ),
"AL39b1" : ( 26874, 26875 ),
"AL39c" : ( 26875, 26876 ),
"AL39cAgeOns" : ( 26876, 26878 ),
"AL39cOns" : ( 26878, 26879 ),
"AL39cAgeRec" : ( 26879, 26881 ),
"AL39cRec" : ( 26881, 26882 ),
"AL39d" : ( 26882, 26883 ),
"AL39_Specify1" : ( 26883, 26963 ),
"AL39_code1" : ( 26963, 26982 ),
"AL39_specify5" : ( 26982, 27062 ),
"AL39_code5" : ( 27062, 27081 ),
"ALSxYrCount" : ( 27081, 27083 ),
"ALSxMnthCount" : ( 27083, 27085 ),
"AL40AgeOns" : ( 27085, 27087 ),
"AL40Ons" : ( 27087, 27088 ),
"AL40AgeRec" : ( 27088, 27090 ),
"AL40Rec" : ( 27090, 27091 ),
"Al40Qsx" : ( 27091, 27092 ),
"Al40Qsx2" : ( 27092, 27093 ),
"Al40Qsx3" : ( 27093, 27094 ),
"Al40Qsx4" : ( 27094, 27095 ),
"Al40Qsx5" : ( 27095, 27096 ),
"Al40Qsx6" : ( 27096, 27097 ),
"Al40Qsx7" : ( 27097, 27098 ),
"Al40Qsx8" : ( 27098, 27099 ),
"Al40Qsx9" : ( 27099, 27100 ),
"Al40Qsx10" : ( 27100, 27101 ),
"Al40Qsx11" : ( 27101, 27102 ),
"Al40Qsx12" : ( 27102, 27103 ),
"Al40Qsx13" : ( 27103, 27104 ),
"Al40Qsx14" : ( 27104, 27105 ),
"Al40Qsx15" : ( 27105, 27106 ),
"Al40Qsx16" : ( 27106, 27107 ),
"Al40Qsx17" : ( 27107, 27108 ),
"Al40Qsx18" : ( 27108, 27109 ),
"Al40Qsx19" : ( 27109, 27110 ),
"Al40Qsx20" : ( 27110, 27111 ),
"Al40Qsx21" : ( 27111, 27112 ),
"Al40Qsx22" : ( 27112, 27113 ),
"Al40Qsx23" : ( 27113, 27114 ),
"Al40Qsx24" : ( 27114, 27115 ),
"Al40Qsx25" : ( 27115, 27116 ),
"Al40Qsx26" : ( 27116, 27117 ),
"Al40Qsx27" : ( 27117, 27118 ),
"Al40Qsx28" : ( 27118, 27119 ),
"Al40Qsx29" : ( 27119, 27120 ),
"Al40Qsx30" : ( 27120, 27121 ),
"Al40Qsx31" : ( 27121, 27122 ),
"Al40Qsx32" : ( 27122, 27123 ),
"Al40aQsx" : ( 27123, 27124 ),
"Al40aQsx2" : ( 27124, 27125 ),
"Al40aQsx3" : ( 27125, 27126 ),
"Al40aQsx4" : ( 27126, 27127 ),
"Al40aQsx5" : ( 27127, 27128 ),
"Al40aQsx6" : ( 27128, 27129 ),
"Al40aQsx7" : ( 27129, 27130 ),
"Al40aQsx8" : ( 27130, 27131 ),
"Al40aQsx9" : ( 27131, 27132 ),
"Al40aQsx10" : ( 27132, 27133 ),
"Al40aQsx11" : ( 27133, 27134 ),
"Al40aQsx12" : ( 27134, 27135 ),
"Al40aQsx13" : ( 27135, 27136 ),
"Al40aQsx14" : ( 27136, 27137 ),
"Al40aQsx15" : ( 27137, 27138 ),
"Al40aQsx16" : ( 27138, 27139 ),
"Al40aQsx17" : ( 27139, 27140 ),
"Al40aQsx18" : ( 27140, 27141 ),
"Al40aQsx19" : ( 27141, 27142 ),
"Al40aQsx20" : ( 27142, 27143 ),
"Al40aQsx21" : ( 27143, 27144 ),
"Al40aQsx22" : ( 27144, 27145 ),
"Al40aQsx23" : ( 27145, 27146 ),
"Al40aQsx24" : ( 27146, 27147 ),
"Al40aQsx25" : ( 27147, 27148 ),
"Al40aQsx26" : ( 27148, 27149 ),
"Al40aQsx27" : ( 27149, 27150 ),
"Al40aQsx28" : ( 27150, 27151 ),
"Al40aQsx29" : ( 27151, 27152 ),
"Al40aQsx30" : ( 27152, 27153 ),
"Al40aQsx31" : ( 27153, 27154 ),
"Al40aQsx32" : ( 27154, 27155 ),
"AL9dYrCl" : ( 27155, 27156 ),
"AL9iYrCl" : ( 27156, 27157 ),
"AL10YrCl" : ( 27157, 27158 ),
"AL10cYrCl" : ( 27158, 27159 ),
"AL10dYrCl" : ( 27159, 27160 ),
"AL12cYrCl" : ( 27160, 27161 ),
"AL13bYrCl" : ( 27161, 27162 ),
"AL14bYrCl" : ( 27162, 27163 ),
"AL15aYrCl" : ( 27163, 27164 ),
"AL16bYrCl" : ( 27164, 27165 ),
"AL21YrCl" : ( 27165, 27166 ),
"AL21dYrCl" : ( 27166, 27167 ),
"AL22bYrCl" : ( 27167, 27168 ),
"AL23bYrCl" : ( 27168, 27169 ),
"AL24bYrCl" : ( 27169, 27170 ),
"AL25YrCl" : ( 27170, 27171 ),
"AL29bYrCl" : ( 27171, 27172 ),
"AL26a1YrCl" : ( 27172, 27173 ),
"AL26a2YrCl" : ( 27173, 27174 ),
"AL26a3YrCl" : ( 27174, 27175 ),
"AL27cYrCl" : ( 27175, 27176 ),
"AL28bYrCl" : ( 27176, 27177 ),
"AL31bYrCl" : ( 27177, 27178 ),
"AL32YrCl" : ( 27178, 27179 ),
"AL33aYrCl" : ( 27179, 27180 ),
"AL37dYrCl" : ( 27180, 27181 ),
"AL38YrCl" : ( 27181, 27182 ),
"AL39YrCl" : ( 27182, 27183 ),
"AL37iYrCl" : ( 27183, 27184 ),
"AL38cYrCl" : ( 27184, 27185 ),
"AL39cYrCl" : ( 27185, 27186 ),
"Al19YrCl" : ( 27186, 27187 ),
"AL9dMnthCl" : ( 27187, 27188 ),
"AL9iMnthCl" : ( 27188, 27189 ),
"AL10MnthCl" : ( 27189, 27190 ),
"AL10cMnthCl" : ( 27190, 27191 ),
"AL10dMnthCl" : ( 27191, 27192 ),
"AL12cMnthCl" : ( 27192, 27193 ),
"AL13bMnthCl" : ( 27193, 27194 ),
"AL14bMnthCl" : ( 27194, 27195 ),
"AL15aMnthCl" : ( 27195, 27196 ),
"AL16bMnthCl" : ( 27196, 27197 ),
"AL21MnthCl" : ( 27197, 27198 ),
"AL21dMnthCl" : ( 27198, 27199 ),
"AL22bMnthCl" : ( 27199, 27200 ),
"AL23bMnthCl" : ( 27200, 27201 ),
"AL24bMnthCl" : ( 27201, 27202 ),
"AL25MnthCl" : ( 27202, 27203 ),
"AL29bMnthCl" : ( 27203, 27204 ),
"AL26a1MnthCl" : ( 27204, 27205 ),
"AL26a2MnthCl" : ( 27205, 27206 ),
"AL26a3MnthCl" : ( 27206, 27207 ),
"AL27cMnthCl" : ( 27207, 27208 ),
"AL28bMnthCl" : ( 27208, 27209 ),
"AL31bMnthCl" : ( 27209, 27210 ),
"AL32MnthCl" : ( 27210, 27211 ),
"AL33aMnthCl" : ( 27211, 27212 ),
"AL37dMnthCl" : ( 27212, 27213 ),
"AL38MnthCl" : ( 27213, 27214 ),
"AL39MnthCl" : ( 27214, 27215 ),
"AL37iMnthCl" : ( 27215, 27216 ),
"AL38cMnthCl" : ( 27216, 27217 ),
"AL39cMnthCl" : ( 27217, 27218 ),
"AL19MnthCl" : ( 27218, 27219 ),
"AL43" : ( 27219, 27222 ),
"AL43A" : ( 27222, 27224 ),
"AL43FromMnth_" : ( 27224, 27226 ),
"AL43FromYr_" : ( 27226, 27230 ),
"AL43ToMnth_" : ( 27230, 27232 ),
"AL43ToYR_" : ( 27232, 27236 ),
"AL43FromMnth_2" : ( 27236, 27238 ),
"AL43FromYr_2" : ( 27238, 27242 ),
"AL43ToMnth_2" : ( 27242, 27244 ),
"AL43ToYR_2" : ( 27244, 27248 ),
"AL43FromMnth_3" : ( 27248, 27250 ),
"AL43FromYr_3" : ( 27250, 27254 ),
"AL43ToMnth_3" : ( 27254, 27256 ),
"AL43ToYR_3" : ( 27256, 27260 ),
"AL43FromMnth_4" : ( 27260, 27262 ),
"AL43FromYr_4" : ( 27262, 27266 ),
"AL43ToMnth_4" : ( 27266, 27268 ),
"AL43ToYR_4" : ( 27268, 27272 ),
"AL43FromMnth_5" : ( 27272, 27274 ),
"AL43FromYr_5" : ( 27274, 27278 ),
"AL43ToMnth_5" : ( 27278, 27280 ),
"AL43ToYR_5" : ( 27280, 27284 ),
"AL44" : ( 27284, 27285 ),
"AL44a1" : ( 27285, 27286 ),
"AL44a2" : ( 27286, 27287 ),
"AL44a3" : ( 27287, 27288 ),
"AL44a4" : ( 27288, 27289 ),
"AL44a5" : ( 27289, 27290 ),
"AL44a6" : ( 27290, 27291 ),
"AL44a6_specify" : ( 27291, 27371 ),
"AL44AgeOns" : ( 27371, 27373 ),
"AL44Ons" : ( 27373, 27374 ),
"AL44AgeRec" : ( 27374, 27376 ),
"AL44Rec" : ( 27376, 27377 ),
"AL44c" : ( 27377, 27378 ),
"AL45" : ( 27378, 27379 ),
"AL45a1" : ( 27379, 27380 ),
"AL45a2" : ( 27380, 27381 ),
"AL45a3" : ( 27381, 27382 ),
"AL45a4" : ( 27382, 27383 ),
"AL45a5" : ( 27383, 27384 ),
"AL45a6" : ( 27384, 27385 ),
"AL45a6_Specify" : ( 27385, 27610 ),
"AL45bAgeOns" : ( 27610, 27612 ),
"AL45bOns" : ( 27612, 27613 ),
"AL45bAgeRec" : ( 27613, 27615 ),
"AL45bRec" : ( 27615, 27616 ),
"AL45c" : ( 27616, 27617 ),
"AL45d" : ( 27617, 27618 ),
"AL45dAgeOns" : ( 27618, 27620 ),
"AL45dOns" : ( 27620, 27621 ),
"AL45dAgeRec" : ( 27621, 27623 ),
"AL45dRec" : ( 27623, 27624 ),
"IND_ID2" : ( 27624, 27633 ),
"MJ1" : ( 27633, 27634 ),
"MJ1a" : ( 27634, 27638 ),
"MJ1a1" : ( 27638, 27639 ),
"MJ1a2" : ( 27639, 27640 ),
"MJ1b" : ( 27640, 27641 ),
"MJ2AgeOns" : ( 27641, 27643 ),
"MJ2Ons" : ( 27643, 27644 ),
"MJ2AgeRec" : ( 27644, 27646 ),
"MJ2Rec" : ( 27646, 27647 ),
"MJ2A" : ( 27647, 27648 ),
"MJ2C" : ( 27648, 27651 ),
"MJ2c1" : ( 27651, 27652 ),
"MJ2d" : ( 27652, 27653 ),
"MJ3_num" : ( 27653, 27656 ),
"MJ3_unit" : ( 27656, 27657 ),
"MJ3AgeOns" : ( 27657, 27659 ),
"MJ3Ons" : ( 27659, 27660 ),
"MJ3AgeRec" : ( 27660, 27662 ),
"MJ3Rec" : ( 27662, 27663 ),
"MJ3b" : ( 27663, 27665 ),
"MJ3c_NUM" : ( 27665, 27668 ),
"MJ3c_UNIT" : ( 27668, 27669 ),
"MJ3d" : ( 27669, 27672 ),
"MJ3e" : ( 27672, 27676 ),
"MJ4" : ( 27676, 27677 ),
"MJ4AgeOns" : ( 27677, 27679 ),
"MJ4Ons" : ( 27679, 27680 ),
"MJ4AgeRec" : ( 27680, 27682 ),
"MJ4Rec" : ( 27682, 27683 ),
"MJ5" : ( 27683, 27684 ),
"MJ6_1" : ( 27684, 27685 ),
"MJ6_2" : ( 27685, 27686 ),
"MJ6_3" : ( 27686, 27687 ),
"MJ6_4" : ( 27687, 27688 ),
"MJ6_5" : ( 27688, 27689 ),
"MJ6a1" : ( 27689, 27690 ),
"MJ6a2" : ( 27690, 27691 ),
"MJ6a3" : ( 27691, 27692 ),
"MJ6a4" : ( 27692, 27693 ),
"MJ6a5" : ( 27693, 27694 ),
"MJ6b" : ( 27694, 27695 ),
"MJ7" : ( 27695, 27696 ),
"MJ7A" : ( 27696, 27697 ),
"MJ7B" : ( 27697, 27698 ),
"MJ8" : ( 27698, 27699 ),
"MJ9" : ( 27699, 27700 ),
"MJ10_1" : ( 27700, 27701 ),
"MJ10_2" : ( 27701, 27702 ),
"MJ10_3" : ( 27702, 27703 ),
"MJ10_4" : ( 27703, 27704 ),
"MJ10_5" : ( 27704, 27705 ),
"MJ10_6" : ( 27705, 27706 ),
"MJ10_7" : ( 27706, 27707 ),
"MJ10dQsx" : ( 27707, 27708 ),
"MJ10dQsx2" : ( 27708, 27709 ),
"MJ10dQsx3" : ( 27709, 27710 ),
"MJ10dQsx4" : ( 27710, 27711 ),
"MJ10dQsx5" : ( 27711, 27712 ),
"MJ10dQsx6" : ( 27712, 27713 ),
"MJ10dQsx7" : ( 27713, 27714 ),
"MJ10d_1" : ( 27714, 27715 ),
"MJ10d_2" : ( 27715, 27716 ),
"MJ10d_3" : ( 27716, 27717 ),
"MJ10d_4" : ( 27717, 27718 ),
"MJ10d_5" : ( 27718, 27719 ),
"MJ10d_6" : ( 27719, 27720 ),
"MJ10d_7" : ( 27720, 27721 ),
"MJ10a" : ( 27721, 27722 ),
"MJ10b" : ( 27722, 27723 ),
"MJ10c" : ( 27723, 27724 ),
"MJ10e" : ( 27724, 27727 ),
"MJ10f" : ( 27727, 27730 ),
"MJ10g" : ( 27730, 27731 ),
"MJ11" : ( 27731, 27732 ),
"MJ11a" : ( 27732, 27733 ),
"MJ11a1" : ( 27733, 27734 ),
"MJ11b" : ( 27734, 27735 ),
"MJ11c" : ( 27735, 27736 ),
"MJ11c1" : ( 27736, 27737 ),
"MJ12" : ( 27737, 27738 ),
"MJ12a" : ( 27738, 27739 ),
"MJ12b" : ( 27739, 27740 ),
"MJ13" : ( 27740, 27741 ),
"MJ13a" : ( 27741, 27742 ),
"MJ13a1" : ( 27742, 27743 ),
"MJ14" : ( 27743, 27744 ),
"MJ14a" : ( 27744, 27745 ),
"MJ16" : ( 27745, 27746 ),
"MJ16AgeOns" : ( 27746, 27748 ),
"MJ16Ons" : ( 27748, 27749 ),
"MJ16AgeRec" : ( 27749, 27751 ),
"MJ16Rec" : ( 27751, 27752 ),
"MJ17" : ( 27752, 27753 ),
"MJ17a" : ( 27753, 27754 ),
"MJ18" : ( 27754, 27755 ),
"MJ18Another" : ( 27755, 27756 ),
"MJ18_Specify" : ( 27756, 27836 ),
"MJ18_Code" : ( 27836, 27855 ),
"MJ18Another2" : ( 27855, 27856 ),
"MJ18_Specify2" : ( 27856, 27936 ),
"MJ18_Code2" : ( 27936, 27955 ),
"MJ18Another3" : ( 27955, 27956 ),
"MJ18_Specify3" : ( 27956, 28036 ),
"MJ18_Code3" : ( 28036, 28055 ),
"MJ18Another4" : ( 28055, 28056 ),
"MJ18_Specify4" : ( 28056, 28136 ),
"MJ18_Code4" : ( 28136, 28155 ),
"MJ19AgeOns" : ( 28155, 28157 ),
"MJ19Ons" : ( 28157, 28158 ),
"MJ19AgeRec" : ( 28158, 28160 ),
"MJ19Rec" : ( 28160, 28161 ),
"MJ19Qsx" : ( 28161, 28162 ),
"MJ19Qsx2" : ( 28162, 28163 ),
"MJ19Qsx3" : ( 28163, 28164 ),
"MJ19Qsx4" : ( 28164, 28165 ),
"MJ19Qsx5" : ( 28165, 28166 ),
"MJ19Qsx6" : ( 28166, 28167 ),
"MJ19Qsx7" : ( 28167, 28168 ),
"MJ19Qsx8" : ( 28168, 28169 ),
"MJ19Qsx9" : ( 28169, 28170 ),
"MJ19Qsx10" : ( 28170, 28171 ),
"MJ19Qsx11" : ( 28171, 28172 ),
"MJ19Qsx12" : ( 28172, 28173 ),
"MJ19Qsx13" : ( 28173, 28174 ),
"MJ19Qsx14" : ( 28174, 28175 ),
"MJ19Qsx15" : ( 28175, 28176 ),
"MJ19Qsx16" : ( 28176, 28177 ),
"MJ19Qsx17" : ( 28177, 28178 ),
"MJ19Qsx18" : ( 28178, 28179 ),
"MJ19dQsx" : ( 28179, 28180 ),
"MJ19dQsx2" : ( 28180, 28181 ),
"MJ19dQsx3" : ( 28181, 28182 ),
"MJ19dQsx4" : ( 28182, 28183 ),
"MJ19dQsx5" : ( 28183, 28184 ),
"MJ19dQsx6" : ( 28184, 28185 ),
"MJ19dQsx7" : ( 28185, 28186 ),
"MJ19dQsx8" : ( 28186, 28187 ),
"MJ19dQsx9" : ( 28187, 28188 ),
"MJ19dQsx10" : ( 28188, 28189 ),
"MJ19dQsx11" : ( 28189, 28190 ),
"MJ19dQsx12" : ( 28190, 28191 ),
"MJ19dQsx13" : ( 28191, 28192 ),
"MJ19dQsx14" : ( 28192, 28193 ),
"MJ19dQsx15" : ( 28193, 28194 ),
"MJ19dQsx16" : ( 28194, 28195 ),
"MJ19dQsx17" : ( 28195, 28196 ),
"MJ19dQsx18" : ( 28196, 28197 ),
"MJ19cAgeOns" : ( 28197, 28199 ),
"MJ19cOns" : ( 28199, 28200 ),
"MJ19cAgeRec" : ( 28200, 28202 ),
"MJ19cRec" : ( 28202, 28203 ),
"MJ5YrCl" : ( 28203, 28204 ),
"MJ6a1YrCl" : ( 28204, 28205 ),
"MJ6a3YrCl" : ( 28205, 28206 ),
"MJ6a4YrCl" : ( 28206, 28207 ),
"MJ6a5YrCl" : ( 28207, 28208 ),
"MJ7YrCl" : ( 28208, 28209 ),
"MJ7aYrCl" : ( 28209, 28210 ),
"MJ8YrCl" : ( 28210, 28211 ),
"MJ9YrCl" : ( 28211, 28212 ),
"MJ10bYrCl" : ( 28212, 28213 ),
"MJ10cYrCl" : ( 28213, 28214 ),
"MJ11aYrCl" : ( 28214, 28215 ),
"MJ11cYrCl" : ( 28215, 28216 ),
"MJ14YrCl" : ( 28216, 28217 ),
"MJ12bYrCl" : ( 28217, 28218 ),
"MJ13aYrCl" : ( 28218, 28219 ),
"MJ16YrCl" : ( 28219, 28220 ),
"MJ17aYrCl" : ( 28220, 28221 ),
"MJ5MnthCl" : ( 28221, 28222 ),
"MJ6a1MnthCl" : ( 28222, 28223 ),
"MJ6a3MnthCl" : ( 28223, 28224 ),
"MJ6a4MnthCl" : ( 28224, 28225 ),
"MJ6a5MnthCl" : ( 28225, 28226 ),
"MJ7MnthCl" : ( 28226, 28227 ),
"MJ7aMnthCl" : ( 28227, 28228 ),
"MJ8MnthCl" : ( 28228, 28229 ),
"MJ9MnthCl" : ( 28229, 28230 ),
"MJ10bMnthCl" : ( 28230, 28231 ),
"MJ10cMnthCl" : ( 28231, 28232 ),
"MJ11aMnthCl" : ( 28232, 28233 ),
"MJ11cMnthCl" : ( 28233, 28234 ),
"MJ14MnthCl" : ( 28234, 28235 ),
"MJ12bMnthCl" : ( 28235, 28236 ),
"MJ13aMnthCl" : ( 28236, 28237 ),
"MJ16MnthCl" : ( 28237, 28238 ),
"MJ17aMnthCl" : ( 28238, 28239 ),
"MJ22" : ( 28239, 28240 ),
"MJ22A" : ( 28240, 28242 ),
"MJ22FromMnth_" : ( 28242, 28244 ),
"MJ22FromYr_" : ( 28244, 28248 ),
"MJ22ToMnth_" : ( 28248, 28250 ),
"MJ22ToYR_" : ( 28250, 28254 ),
"MJ22FromMnth_2" : ( 28254, 28256 ),
"MJ22FromYr_2" : ( 28256, 28260 ),
"MJ22ToMnth_2" : ( 28260, 28262 ),
"MJ22ToYR_2" : ( 28262, 28266 ),
"MJ22FromMnth_3" : ( 28266, 28268 ),
"MJ22FromYr_3" : ( 28268, 28272 ),
"MJ22ToMnth_3" : ( 28272, 28274 ),
"MJ22ToYR_3" : ( 28274, 28278 ),
"MJ22FromMnth_4" : ( 28278, 28280 ),
"MJ22FromYr_4" : ( 28280, 28284 ),
"MJ22ToMnth_4" : ( 28284, 28286 ),
"MJ22ToYR_4" : ( 28286, 28290 ),
"MJ22FromMnth_5" : ( 28290, 28292 ),
"MJ22FromYr_5" : ( 28292, 28296 ),
"MJ22ToMnth_5" : ( 28296, 28298 ),
"MJ22ToYR_5" : ( 28298, 28302 ),
"MJ23" : ( 28302, 28303 ),
"MJ23a1" : ( 28303, 28304 ),
"MJ23a2" : ( 28304, 28305 ),
"MJ23a3" : ( 28305, 28306 ),
"MJ23a4" : ( 28306, 28307 ),
"MJ23a5" : ( 28307, 28308 ),
"MJ23a6" : ( 28308, 28309 ),
"MJ23a6_specify" : ( 28309, 28389 ),
"MJ23AgeOns" : ( 28389, 28391 ),
"MJ23Ons" : ( 28391, 28392 ),
"MJ23AgeRec" : ( 28392, 28394 ),
"MJ23Rec" : ( 28394, 28395 ),
"MJ23c" : ( 28395, 28396 ),
"MJ24" : ( 28396, 28397 ),
"MJ24a1" : ( 28397, 28398 ),
"MJ24a2" : ( 28398, 28399 ),
"MJ24a3" : ( 28399, 28400 ),
"MJ24a4" : ( 28400, 28401 ),
"MJ24a5" : ( 28401, 28402 ),
"MJ24a6" : ( 28402, 28403 ),
"MJ24a6_Specify" : ( 28403, 28628 ),
"MJ24bAgeOns" : ( 28628, 28630 ),
"MJ24bOns" : ( 28630, 28631 ),
"MJ24bAgeRec" : ( 28631, 28633 ),
"MJ24bRec" : ( 28633, 28634 ),
"MJ24c" : ( 28634, 28635 ),
"MJ24d" : ( 28635, 28636 ),
"MJ24dAgeOns" : ( 28636, 28638 ),
"MJ24dOns" : ( 28638, 28639 ),
"MJ24dAgeRec" : ( 28639, 28641 ),
"MJ24dRec" : ( 28641, 28642 ),
"NumDrugs" : ( 28642, 28644 ),
"IND_ID3" : ( 28644, 28654 ),
"DR1_" : ( 28654, 28655 ),
"DR1_2" : ( 28655, 28656 ),
"DR1_3" : ( 28656, 28657 ),
"DR1_4" : ( 28657, 28658 ),
"DR1_5" : ( 28658, 28659 ),
"DR1_6" : ( 28659, 28660 ),
"DR1_7" : ( 28660, 28661 ),
"DR1_8" : ( 28661, 28662 ),
"DR1_9" : ( 28662, 28663 ),
"DRUG5" : ( 28663, 28918 ),
"DR1a_" : ( 28918, 28922 ),
"DR1a1_" : ( 28922, 28923 ),
"DR1a2_" : ( 28923, 28924 ),
"DR1bAgeOns_" : ( 28924, 28926 ),
"DR1bOns_" : ( 28926, 28927 ),
"DR1bAgeRec_" : ( 28927, 28929 ),
"DR1bRec_" : ( 28929, 28930 ),
"DR1c_" : ( 28930, 28931 ),
"DR1a_2" : ( 28931, 28935 ),
"DR1a1_2" : ( 28935, 28936 ),
"DR1a2_2" : ( 28936, 28937 ),
"DR1bAgeOns_2" : ( 28937, 28939 ),
"DR1bOns_2" : ( 28939, 28940 ),
"DR1bAgeRec_2" : ( 28940, 28942 ),
"DR1bRec_2" : ( 28942, 28943 ),
"DR1c_2" : ( 28943, 28944 ),
"DR1a_3" : ( 28944, 28948 ),
"DR1a1_3" : ( 28948, 28949 ),
"DR1a2_3" : ( 28949, 28950 ),
"DR1bAgeOns_3" : ( 28950, 28952 ),
"DR1bOns_3" : ( 28952, 28953 ),
"DR1bAgeRec_3" : ( 28953, 28955 ),
"DR1bRec_3" : ( 28955, 28956 ),
"DR1c_3" : ( 28956, 28957 ),
"DR1a_4" : ( 28957, 28961 ),
"DR1a1_4" : ( 28961, 28962 ),
"DR1a2_4" : ( 28962, 28963 ),
"DR1bAgeOns_4" : ( 28963, 28965 ),
"DR1bOns_4" : ( 28965, 28966 ),
"DR1bAgeRec_4" : ( 28966, 28968 ),
"DR1bRec_4" : ( 28968, 28969 ),
"DR1c_4" : ( 28969, 28970 ),
"DR1a_5" : ( 28970, 28974 ),
"DR1a1_5" : ( 28974, 28975 ),
"DR1a2_5" : ( 28975, 28976 ),
"DR1bAgeOns_5" : ( 28976, 28978 ),
"DR1bOns_5" : ( 28978, 28979 ),
"DR1bAgeRec_5" : ( 28979, 28981 ),
"DR1bRec_5" : ( 28981, 28982 ),
"DR1c_5" : ( 28982, 28983 ),
"DR1a_6" : ( 28983, 28987 ),
"DR1a1_6" : ( 28987, 28988 ),
"DR1a2_6" : ( 28988, 28989 ),
"DR1bAgeOns_6" : ( 28989, 28991 ),
"DR1bOns_6" : ( 28991, 28992 ),
"DR1bAgeRec_6" : ( 28992, 28994 ),
"DR1bRec_6" : ( 28994, 28995 ),
"DR1c_6" : ( 28995, 28996 ),
"DR1a_7" : ( 28996, 29000 ),
"DR1a1_7" : ( 29000, 29001 ),
"DR1a2_7" : ( 29001, 29002 ),
"DR1bAgeOns_7" : ( 29002, 29004 ),
"DR1bOns_7" : ( 29004, 29005 ),
"DR1bAgeRec_7" : ( 29005, 29007 ),
"DR1bRec_7" : ( 29007, 29008 ),
"DR1c_7" : ( 29008, 29009 ),
"DR1a_8" : ( 29009, 29013 ),
"DR1a1_8" : ( 29013, 29014 ),
"DR1a2_8" : ( 29014, 29015 ),
"DR1bAgeOns_8" : ( 29015, 29017 ),
"DR1bOns_8" : ( 29017, 29018 ),
"DR1bAgeRec_8" : ( 29018, 29020 ),
"DR1bRec_8" : ( 29020, 29021 ),
"DR1c_8" : ( 29021, 29022 ),
"DR1a_9" : ( 29022, 29026 ),
"DR1a1_9" : ( 29026, 29027 ),
"DR1a2_9" : ( 29027, 29028 ),
"DR1bAgeOns_9" : ( 29028, 29030 ),
"DR1bOns_9" : ( 29030, 29031 ),
"DR1bAgeRec_9" : ( 29031, 29033 ),
"DR1bRec_9" : ( 29033, 29034 ),
"DR1c_9" : ( 29034, 29035 ),
"DR1d" : ( 29035, 29036 ),
"DR1e" : ( 29036, 29037 ),
"DR1f_" : ( 29037, 29038 ),
"DR1f1_" : ( 29038, 29042 ),
"DR1fAgeOns_" : ( 29042, 29044 ),
"DR1fOns_" : ( 29044, 29045 ),
"DR1fAgeRec_" : ( 29045, 29047 ),
"DR1fRec_" : ( 29047, 29048 ),
"DR1f_2" : ( 29048, 29049 ),
"DR1f1_2" : ( 29049, 29053 ),
"DR1fAgeOns_2" : ( 29053, 29055 ),
"DR1fOns_2" : ( 29055, 29056 ),
"DR1fAgeRec_2" : ( 29056, 29058 ),
"DR1fRec_2" : ( 29058, 29059 ),
"DR1f_3" : ( 29059, 29060 ),
"DR1f1_3" : ( 29060, 29064 ),
"DR1fAgeOns_3" : ( 29064, 29066 ),
"DR1fOns_3" : ( 29066, 29067 ),
"DR1fAgeRec_3" : ( 29067, 29069 ),
"DR1fRec_3" : ( 29069, 29070 ),
"DR1f_4" : ( 29070, 29071 ),
"DR1f1_4" : ( 29071, 29075 ),
"DR1fAgeOns_4" : ( 29075, 29077 ),
"DR1fOns_4" : ( 29077, 29078 ),
"DR1fAgeRec_4" : ( 29078, 29080 ),
"DR1fRec_4" : ( 29080, 29081 ),
"DR1f_5" : ( 29081, 29082 ),
"DR1f1_5" : ( 29082, 29086 ),
"DR1fAgeOns_5" : ( 29086, 29088 ),
"DR1fOns_5" : ( 29088, 29089 ),
"DR1fAgeRec_5" : ( 29089, 29091 ),
"DR1fRec_5" : ( 29091, 29092 ),
"DR1f_6" : ( 29092, 29093 ),
"DR1f1_6" : ( 29093, 29097 ),
"DR1fAgeOns_6" : ( 29097, 29099 ),
"DR1fOns_6" : ( 29099, 29100 ),
"DR1fAgeRec_6" : ( 29100, 29102 ),
"DR1fRec_6" : ( 29102, 29103 ),
"DR1f_7" : ( 29103, 29104 ),
"DR1f1_7" : ( 29104, 29108 ),
"DR1fAgeOns_7" : ( 29108, 29110 ),
"DR1fOns_7" : ( 29110, 29111 ),
"DR1fAgeRec_7" : ( 29111, 29113 ),
"DR1fRec_7" : ( 29113, 29114 ),
"DR1f_8" : ( 29114, 29115 ),
"DR1f1_8" : ( 29115, 29119 ),
"DR1fAgeOns_8" : ( 29119, 29121 ),
"DR1fOns_8" : ( 29121, 29122 ),
"DR1fAgeRec_8" : ( 29122, 29124 ),
"DR1fRec_8" : ( 29124, 29125 ),
"DR1f_9" : ( 29125, 29126 ),
"DR1f1_9" : ( 29126, 29130 ),
"DR1fAgeOns_9" : ( 29130, 29132 ),
"DR1fOns_9" : ( 29132, 29133 ),
"DR1fAgeRec_9" : ( 29133, 29135 ),
"DR1fRec_9" : ( 29135, 29136 ),
"DR1iSPECIFYa" : ( 29136, 29391 ),
"DR1iCODEa" : ( 29391, 29394 ),
"DR1iSPECIFYb" : ( 29394, 29649 ),
"DR1iCODEb" : ( 29649, 29652 ),
"dr1i_ANOTHER" : ( 29652, 29653 ),
"DR1iSPECIFYa2" : ( 29653, 29908 ),
"DR1iCODEa2" : ( 29908, 29911 ),
"DR1iSPECIFYb2" : ( 29911, 30166 ),
"DR1iCODEb2" : ( 30166, 30169 ),
"dr1i_ANOTHER2" : ( 30169, 30170 ),
"DR1iSPECIFYa3" : ( 30170, 30425 ),
"DR1iCODEa3" : ( 30425, 30428 ),
"DR1iSPECIFYb3" : ( 30428, 30683 ),
"DR1iCODEb3" : ( 30683, 30686 ),
"dr1i_ANOTHER3" : ( 30686, 30687 ),
"DR1iSPECIFYa4" : ( 30687, 30942 ),
"DR1iCODEa4" : ( 30942, 30945 ),
"DR1iSPECIFYb4" : ( 30945, 31200 ),
"DR1iCODEb4" : ( 31200, 31203 ),
"dr1i_ANOTHER4" : ( 31203, 31204 ),
"DR1iSPECIFYa5" : ( 31204, 31459 ),
"DR1iCODEa5" : ( 31459, 31462 ),
"DR1iSPECIFYb5" : ( 31462, 31717 ),
"DR1iCODEb5" : ( 31717, 31720 ),
"dr1i_ANOTHER5" : ( 31720, 31721 ),
"DR1f_aux" : ( 31721, 31722 ),
"DR1g" : ( 31722, 31723 ),
"DR1g1" : ( 31723, 31727 ),
"DR1gAgeOns" : ( 31727, 31729 ),
"DR1gOns" : ( 31729, 31730 ),
"DR1gAgeRec" : ( 31730, 31732 ),
"DR1gRec" : ( 31732, 31733 ),
"DR1h_specify" : ( 31733, 31813 ),
"DR1h_CODE" : ( 31813, 31816 ),
"DR1i" : ( 31816, 31817 ),
"DR2_specify" : ( 31817, 31897 ),
"DR2_Code" : ( 31897, 31900 ),
"DR2_NUM_" : ( 31900, 31902 ),
"DR2_UNIT_" : ( 31902, 31903 ),
"DR2A_" : ( 31903, 31904 ),
"DR2B_" : ( 31904, 31906 ),
"DR2B1_" : ( 31906, 31909 ),
"DR2B2_" : ( 31909, 31911 ),
"DR2AgeOns_" : ( 31911, 31913 ),
"DR2_NUM_2" : ( 31913, 31915 ),
"DR2_UNIT_2" : ( 31915, 31916 ),
"DR2A_2" : ( 31916, 31917 ),
"DR2B_2" : ( 31917, 31919 ),
"DR2B1_2" : ( 31919, 31922 ),
"DR2B2_2" : ( 31922, 31924 ),
"DR2AgeOns_2" : ( 31924, 31926 ),
"DR2_NUM_3" : ( 31926, 31928 ),
"DR2_UNIT_3" : ( 31928, 31929 ),
"DR2A_3" : ( 31929, 31930 ),
"DR2B_3" : ( 31930, 31932 ),
"DR2B1_3" : ( 31932, 31935 ),
"DR2B2_3" : ( 31935, 31937 ),
"DR2AgeOns_3" : ( 31937, 31939 ),
"DR2_NUM_4" : ( 31939, 31941 ),
"DR2_UNIT_4" : ( 31941, 31942 ),
"DR2A_4" : ( 31942, 31943 ),
"DR2B_4" : ( 31943, 31945 ),
"DR2B1_4" : ( 31945, 31948 ),
"DR2B2_4" : ( 31948, 31950 ),
"DR2AgeOns_4" : ( 31950, 31952 ),
"DR2_NUM_5" : ( 31952, 31954 ),
"DR2_UNIT_5" : ( 31954, 31955 ),
"DR2A_5" : ( 31955, 31956 ),
"DR2B_5" : ( 31956, 31958 ),
"DR2B1_5" : ( 31958, 31961 ),
"DR2B2_5" : ( 31961, 31963 ),
"DR2AgeOns_5" : ( 31963, 31965 ),
"DR3_" : ( 31965, 31966 ),
"DR3a_" : ( 31966, 31967 ),
"DR3_2" : ( 31967, 31968 ),
"DR3a_2" : ( 31968, 31969 ),
"DR3_3" : ( 31969, 31970 ),
"DR3a_3" : ( 31970, 31971 ),
"DR3_4" : ( 31971, 31972 ),
"DR3a_4" : ( 31972, 31973 ),
"DR3_5" : ( 31973, 31974 ),
"DR3a_5" : ( 31974, 31975 ),
"DR5_" : ( 31975, 31976 ),
"DR5a_" : ( 31976, 31977 ),
"DR5AgeOns_" : ( 31977, 31979 ),
"DR5Ons_" : ( 31979, 31980 ),
"DR5AgeRec_" : ( 31980, 31982 ),
"DR5Rec_" : ( 31982, 31983 ),
"DR5_2" : ( 31983, 31984 ),
"DR5a_2" : ( 31984, 31985 ),
"DR5AgeOns_2" : ( 31985, 31987 ),
"DR5Ons_2" : ( 31987, 31988 ),
"DR5AgeRec_2" : ( 31988, 31990 ),
"DR5Rec_2" : ( 31990, 31991 ),
"DR5_3" : ( 31991, 31992 ),
"DR5a_3" : ( 31992, 31993 ),
"DR5AgeOns_3" : ( 31993, 31995 ),
"DR5Ons_3" : ( 31995, 31996 ),
"DR5AgeRec_3" : ( 31996, 31998 ),
"DR5Rec_3" : ( 31998, 31999 ),
"DR5_4" : ( 31999, 32000 ),
"DR5a_4" : ( 32000, 32001 ),
"DR5AgeOns_4" : ( 32001, 32003 ),
"DR5Ons_4" : ( 32003, 32004 ),
"DR5AgeRec_4" : ( 32004, 32006 ),
"DR5Rec_4" : ( 32006, 32007 ),
"DR5_5" : ( 32007, 32008 ),
"DR5a_5" : ( 32008, 32009 ),
"DR5AgeOns_5" : ( 32009, 32011 ),
"DR5Ons_5" : ( 32011, 32012 ),
"DR5AgeRec_5" : ( 32012, 32014 ),
"DR5Rec_5" : ( 32014, 32015 ),
"DR6_" : ( 32015, 32016 ),
"DR6_2" : ( 32016, 32017 ),
"DR6_3" : ( 32017, 32018 ),
"DR6_4" : ( 32018, 32019 ),
"DR6_5" : ( 32019, 32020 ),
"DR7_" : ( 32020, 32021 ),
"DR7a_" : ( 32021, 32022 ),
"DR7b_" : ( 32022, 32023 ),
"DR7_2" : ( 32023, 32024 ),
"DR7a_2" : ( 32024, 32025 ),
"DR7b_2" : ( 32025, 32026 ),
"DR7_3" : ( 32026, 32027 ),
"DR7a_3" : ( 32027, 32028 ),
"DR7b_3" : ( 32028, 32029 ),
"DR7_4" : ( 32029, 32030 ),
"DR7a_4" : ( 32030, 32031 ),
"DR7b_4" : ( 32031, 32032 ),
"DR7_5" : ( 32032, 32033 ),
"DR7a_5" : ( 32033, 32034 ),
"DR7b_5" : ( 32034, 32035 ),
"DR8_" : ( 32035, 32036 ),
"DR8_2" : ( 32036, 32037 ),
"DR8_3" : ( 32037, 32038 ),
"DR8_4" : ( 32038, 32039 ),
"DR8_5" : ( 32039, 32040 ),
"DR9_" : ( 32040, 32041 ),
"DR9a_" : ( 32041, 32042 ),
"DR9_2" : ( 32042, 32043 ),
"DR9a_2" : ( 32043, 32044 ),
"DR9_3" : ( 32044, 32045 ),
"DR9a_3" : ( 32045, 32046 ),
"DR9_4" : ( 32046, 32047 ),
"DR9a_4" : ( 32047, 32048 ),
"DR9_5" : ( 32048, 32049 ),
"DR9a_5" : ( 32049, 32050 ),
"DR10_" : ( 32050, 32051 ),
"DR10_2" : ( 32051, 32052 ),
"DR10_3" : ( 32052, 32053 ),
"DR10_4" : ( 32053, 32054 ),
"DR10_5" : ( 32054, 32055 ),
"DR11a1_" : ( 32055, 32056 ),
"DR11a2_" : ( 32056, 32057 ),
"DR11a3_" : ( 32057, 32058 ),
"DR11a4_" : ( 32058, 32059 ),
"DR11a5_" : ( 32059, 32060 ),
"DR11a6_" : ( 32060, 32061 ),
"DR11a7_" : ( 32061, 32062 ),
"DR11a8_" : ( 32062, 32063 ),
"DR11a9_" : ( 32063, 32064 ),
"DR11a10_" : ( 32064, 32065 ),
"DR11a11_" : ( 32065, 32066 ),
"DR11a12_" : ( 32066, 32067 ),
"DR11a13_" : ( 32067, 32068 ),
"DR11a14_" : ( 32068, 32069 ),
"DR11a15_" : ( 32069, 32070 ),
"DR11a16_" : ( 32070, 32071 ),
"DR11a17_" : ( 32071, 32072 ),
"DR11a18_" : ( 32072, 32073 ),
"DR11a19_" : ( 32073, 32074 ),
"DR11a20_" : ( 32074, 32075 ),
"DR11a21_" : ( 32075, 32076 ),
"DR11a22_" : ( 32076, 32077 ),
"DR11a23_" : ( 32077, 32078 ),
"DR11a24_" : ( 32078, 32079 ),
"DR11a25_" : ( 32079, 32080 ),
"DR11a26_" : ( 32080, 32081 ),
"DR11a27_" : ( 32081, 32082 ),
"DR11a28_" : ( 32082, 32083 ),
"DR11a29_" : ( 32083, 32084 ),
"DR11b_" : ( 32084, 32085 ),
"DR11b1_" : ( 32085, 32086 ),
"DR11bAgeOns_" : ( 32086, 32088 ),
"DR11bOns_" : ( 32088, 32089 ),
"DR11bAgeRec_" : ( 32089, 32091 ),
"DR11bRec_" : ( 32091, 32092 ),
"DR11c_" : ( 32092, 32093 ),
"DR11d_" : ( 32093, 32094 ),
"DR11e_" : ( 32094, 32095 ),
"DR11eAgeOns_" : ( 32095, 32097 ),
"DR11eOns_" : ( 32097, 32098 ),
"DR11eAgeRec_" : ( 32098, 32100 ),
"DR11eRec_" : ( 32100, 32101 ),
"DR11e2_" : ( 32101, 32102 ),
"DR11a1_2" : ( 32102, 32103 ),
"DR11a2_2" : ( 32103, 32104 ),
"DR11a3_2" : ( 32104, 32105 ),
"DR11a4_2" : ( 32105, 32106 ),
"DR11a5_2" : ( 32106, 32107 ),
"DR11a6_2" : ( 32107, 32108 ),
"DR11a7_2" : ( 32108, 32109 ),
"DR11a8_2" : ( 32109, 32110 ),
"DR11a9_2" : ( 32110, 32111 ),
"DR11a10_2" : ( 32111, 32112 ),
"DR11a11_2" : ( 32112, 32113 ),
"DR11a12_2" : ( 32113, 32114 ),
"DR11a13_2" : ( 32114, 32115 ),
"DR11a14_2" : ( 32115, 32116 ),
"DR11a15_2" : ( 32116, 32117 ),
"DR11a16_2" : ( 32117, 32118 ),
"DR11a17_2" : ( 32118, 32119 ),
"DR11a18_2" : ( 32119, 32120 ),
"DR11a19_2" : ( 32120, 32121 ),
"DR11a20_2" : ( 32121, 32122 ),
"DR11a21_2" : ( 32122, 32123 ),
"DR11a22_2" : ( 32123, 32124 ),
"DR11a23_2" : ( 32124, 32125 ),
"DR11a24_2" : ( 32125, 32126 ),
"DR11a25_2" : ( 32126, 32127 ),
"DR11a26_2" : ( 32127, 32128 ),
"DR11a27_2" : ( 32128, 32129 ),
"DR11a28_2" : ( 32129, 32130 ),
"DR11a29_2" : ( 32130, 32131 ),
"DR11b_2" : ( 32131, 32132 ),
"DR11b1_2" : ( 32132, 32133 ),
"DR11bAgeOns_2" : ( 32133, 32135 ),
"DR11bOns_2" : ( 32135, 32136 ),
"DR11bAgeRec_2" : ( 32136, 32138 ),
"DR11bRec_2" : ( 32138, 32139 ),
"DR11c_2" : ( 32139, 32140 ),
"DR11d_2" : ( 32140, 32141 ),
"DR11e_2" : ( 32141, 32142 ),
"DR11eAgeOns_2" : ( 32142, 32144 ),
"DR11eOns_2" : ( 32144, 32145 ),
"DR11eAgeRec_2" : ( 32145, 32147 ),
"DR11eRec_2" : ( 32147, 32148 ),
"DR11e2_2" : ( 32148, 32149 ),
"DR11a1_3" : ( 32149, 32150 ),
"DR11a2_3" : ( 32150, 32151 ),
"DR11a3_3" : ( 32151, 32152 ),
"DR11a4_3" : ( 32152, 32153 ),
"DR11a5_3" : ( 32153, 32154 ),
"DR11a6_3" : ( 32154, 32155 ),
"DR11a7_3" : ( 32155, 32156 ),
"DR11a8_3" : ( 32156, 32157 ),
"DR11a9_3" : ( 32157, 32158 ),
"DR11a10_3" : ( 32158, 32159 ),
"DR11a11_3" : ( 32159, 32160 ),
"DR11a12_3" : ( 32160, 32161 ),
"DR11a13_3" : ( 32161, 32162 ),
"DR11a14_3" : ( 32162, 32163 ),
"DR11a15_3" : ( 32163, 32164 ),
"DR11a16_3" : ( 32164, 32165 ),
"DR11a17_3" : ( 32165, 32166 ),
"DR11a18_3" : ( 32166, 32167 ),
"DR11a19_3" : ( 32167, 32168 ),
"DR11a20_3" : ( 32168, 32169 ),
"DR11a21_3" : ( 32169, 32170 ),
"DR11a22_3" : ( 32170, 32171 ),
"DR11a23_3" : ( 32171, 32172 ),
"DR11a24_3" : ( 32172, 32173 ),
"DR11a25_3" : ( 32173, 32174 ),
"DR11a26_3" : ( 32174, 32175 ),
"DR11a27_3" : ( 32175, 32176 ),
"DR11a28_3" : ( 32176, 32177 ),
"DR11a29_3" : ( 32177, 32178 ),
"DR11b_3" : ( 32178, 32179 ),
"DR11b1_3" : ( 32179, 32180 ),
"DR11bAgeOns_3" : ( 32180, 32182 ),
"DR11bOns_3" : ( 32182, 32183 ),
"DR11bAgeRec_3" : ( 32183, 32185 ),
"DR11bRec_3" : ( 32185, 32186 ),
"DR11c_3" : ( 32186, 32187 ),
"DR11d_3" : ( 32187, 32188 ),
"DR11e_3" : ( 32188, 32189 ),
"DR11eAgeOns_3" : ( 32189, 32191 ),
"DR11eOns_3" : ( 32191, 32192 ),
"DR11eAgeRec_3" : ( 32192, 32194 ),
"DR11eRec_3" : ( 32194, 32195 ),
"DR11e2_3" : ( 32195, 32196 ),
"DR11a1_4" : ( 32196, 32197 ),
"DR11a2_4" : ( 32197, 32198 ),
"DR11a3_4" : ( 32198, 32199 ),
"DR11a4_4" : ( 32199, 32200 ),
"DR11a5_4" : ( 32200, 32201 ),
"DR11a6_4" : ( 32201, 32202 ),
"DR11a7_4" : ( 32202, 32203 ),
"DR11a8_4" : ( 32203, 32204 ),
"DR11a9_4" : ( 32204, 32205 ),
"DR11a10_4" : ( 32205, 32206 ),
"DR11a11_4" : ( 32206, 32207 ),
"DR11a12_4" : ( 32207, 32208 ),
"DR11a13_4" : ( 32208, 32209 ),
"DR11a14_4" : ( 32209, 32210 ),
"DR11a15_4" : ( 32210, 32211 ),
"DR11a16_4" : ( 32211, 32212 ),
"DR11a17_4" : ( 32212, 32213 ),
"DR11a18_4" : ( 32213, 32214 ),
"DR11a19_4" : ( 32214, 32215 ),
"DR11a20_4" : ( 32215, 32216 ),
"DR11a21_4" : ( 32216, 32217 ),
"DR11a22_4" : ( 32217, 32218 ),
"DR11a23_4" : ( 32218, 32219 ),
"DR11a24_4" : ( 32219, 32220 ),
"DR11a25_4" : ( 32220, 32221 ),
"DR11a26_4" : ( 32221, 32222 ),
"DR11a27_4" : ( 32222, 32223 ),
"DR11a28_4" : ( 32223, 32224 ),
"DR11a29_4" : ( 32224, 32225 ),
"DR11b_4" : ( 32225, 32226 ),
"DR11b1_4" : ( 32226, 32227 ),
"DR11bAgeOns_4" : ( 32227, 32229 ),
"DR11bOns_4" : ( 32229, 32230 ),
"DR11bAgeRec_4" : ( 32230, 32232 ),
"DR11bRec_4" : ( 32232, 32233 ),
"DR11c_4" : ( 32233, 32234 ),
"DR11d_4" : ( 32234, 32235 ),
"DR11e_4" : ( 32235, 32236 ),
"DR11eAgeOns_4" : ( 32236, 32238 ),
"DR11eOns_4" : ( 32238, 32239 ),
"DR11eAgeRec_4" : ( 32239, 32241 ),
"DR11eRec_4" : ( 32241, 32242 ),
"DR11e2_4" : ( 32242, 32243 ),
"DR11a1_5" : ( 32243, 32244 ),
"DR11a2_5" : ( 32244, 32245 ),
"DR11a3_5" : ( 32245, 32246 ),
"DR11a4_5" : ( 32246, 32247 ),
"DR11a5_5" : ( 32247, 32248 ),
"DR11a6_5" : ( 32248, 32249 ),
"DR11a7_5" : ( 32249, 32250 ),
"DR11a8_5" : ( 32250, 32251 ),
"DR11a9_5" : ( 32251, 32252 ),
"DR11a10_5" : ( 32252, 32253 ),
"DR11a11_5" : ( 32253, 32254 ),
"DR11a12_5" : ( 32254, 32255 ),
"DR11a13_5" : ( 32255, 32256 ),
"DR11a14_5" : ( 32256, 32257 ),
"DR11a15_5" : ( 32257, 32258 ),
"DR11a16_5" : ( 32258, 32259 ),
"DR11a17_5" : ( 32259, 32260 ),
"DR11a18_5" : ( 32260, 32261 ),
"DR11a19_5" : ( 32261, 32262 ),
"DR11a20_5" : ( 32262, 32263 ),
"DR11a21_5" : ( 32263, 32264 ),
"DR11a22_5" : ( 32264, 32265 ),
"DR11a23_5" : ( 32265, 32266 ),
"DR11a24_5" : ( 32266, 32267 ),
"DR11a25_5" : ( 32267, 32268 ),
"DR11a26_5" : ( 32268, 32269 ),
"DR11a27_5" : ( 32269, 32270 ),
"DR11a28_5" : ( 32270, 32271 ),
"DR11a29_5" : ( 32271, 32272 ),
"DR11b_5" : ( 32272, 32273 ),
"DR11b1_5" : ( 32273, 32274 ),
"DR11bAgeOns_5" : ( 32274, 32276 ),
"DR11bOns_5" : ( 32276, 32277 ),
"DR11bAgeRec_5" : ( 32277, 32279 ),
"DR11bRec_5" : ( 32279, 32280 ),
"DR11c_5" : ( 32280, 32281 ),
"DR11d_5" : ( 32281, 32282 ),
"DR11e_5" : ( 32282, 32283 ),
"DR11eAgeOns_5" : ( 32283, 32285 ),
"DR11eOns_5" : ( 32285, 32286 ),
"DR11eAgeRec_5" : ( 32286, 32288 ),
"DR11eRec_5" : ( 32288, 32289 ),
"DR11e2_5" : ( 32289, 32290 ),
"DR12a_" : ( 32290, 32291 ),
"DR12a1_" : ( 32291, 32292 ),
"DR12a2_" : ( 32292, 32293 ),
"DR12b_" : ( 32293, 32294 ),
"DR12b1_" : ( 32294, 32295 ),
"DR12c_" : ( 32295, 32296 ),
"DR12cSpecify_" : ( 32296, 32521 ),
"DR12c1_" : ( 32521, 32522 ),
"DR12a_2" : ( 32522, 32523 ),
"DR12a1_2" : ( 32523, 32524 ),
"DR12a2_2" : ( 32524, 32525 ),
"DR12b_2" : ( 32525, 32526 ),
"DR12b1_2" : ( 32526, 32527 ),
"DR12c_2" : ( 32527, 32528 ),
"DR12cSpecify_2" : ( 32528, 32753 ),
"DR12c1_2" : ( 32753, 32754 ),
"DR12a_3" : ( 32754, 32755 ),
"DR12a1_3" : ( 32755, 32756 ),
"DR12a2_3" : ( 32756, 32757 ),
"DR12b_3" : ( 32757, 32758 ),
"DR12b1_3" : ( 32758, 32759 ),
"DR12c_3" : ( 32759, 32760 ),
"DR12cSpecify_3" : ( 32760, 32985 ),
"DR12c1_3" : ( 32985, 32986 ),
"DR12a_4" : ( 32986, 32987 ),
"DR12a1_4" : ( 32987, 32988 ),
"DR12a2_4" : ( 32988, 32989 ),
"DR12b_4" : ( 32989, 32990 ),
"DR12b1_4" : ( 32990, 32991 ),
"DR12c_4" : ( 32991, 32992 ),
"DR12cSpecify_4" : ( 32992, 33217 ),
"DR12c1_4" : ( 33217, 33218 ),
"DR12a_5" : ( 33218, 33219 ),
"DR12a1_5" : ( 33219, 33220 ),
"DR12a2_5" : ( 33220, 33221 ),
"DR12b_5" : ( 33221, 33222 ),
"DR12b1_5" : ( 33222, 33223 ),
"DR12c_5" : ( 33223, 33224 ),
"DR12cSpecify_5" : ( 33224, 33449 ),
"DR12c1_5" : ( 33449, 33450 ),
"DR13a_" : ( 33450, 33451 ),
"DR13b_" : ( 33451, 33452 ),
"DR13c_" : ( 33452, 33453 ),
"DR13d_" : ( 33453, 33454 ),
"DR13a_2" : ( 33454, 33455 ),
"DR13b_2" : ( 33455, 33456 ),
"DR13c_2" : ( 33456, 33457 ),
"DR13d_2" : ( 33457, 33458 ),
"DR13a_3" : ( 33458, 33459 ),
"DR13b_3" : ( 33459, 33460 ),
"DR13c_3" : ( 33460, 33461 ),
"DR13d_3" : ( 33461, 33462 ),
"DR13a_4" : ( 33462, 33463 ),
"DR13b_4" : ( 33463, 33464 ),
"DR13c_4" : ( 33464, 33465 ),
"DR13d_4" : ( 33465, 33466 ),
"DR13a_5" : ( 33466, 33467 ),
"DR13b_5" : ( 33467, 33468 ),
"DR13c_5" : ( 33468, 33469 ),
"DR13d_5" : ( 33469, 33470 ),
"DR14_" : ( 33470, 33471 ),
"DR14a_" : ( 33471, 33472 ),
"DR14b_" : ( 33472, 33473 ),
"DR14_2" : ( 33473, 33474 ),
"DR14a_2" : ( 33474, 33475 ),
"DR14b_2" : ( 33475, 33476 ),
"DR14_3" : ( 33476, 33477 ),
"DR14a_3" : ( 33477, 33478 ),
"DR14b_3" : ( 33478, 33479 ),
"DR14_4" : ( 33479, 33480 ),
"DR14a_4" : ( 33480, 33481 ),
"DR14b_4" : ( 33481, 33482 ),
"DR14_5" : ( 33482, 33483 ),
"DR14a_5" : ( 33483, 33484 ),
"DR14b_5" : ( 33484, 33485 ),
"DR15_" : ( 33485, 33486 ),
"DR15a_" : ( 33486, 33487 ),
"DR15b_" : ( 33487, 33488 ),
"DR15_2" : ( 33488, 33489 ),
"DR15a_2" : ( 33489, 33490 ),
"DR15b_2" : ( 33490, 33491 ),
"DR15_3" : ( 33491, 33492 ),
"DR15a_3" : ( 33492, 33493 ),
"DR15b_3" : ( 33493, 33494 ),
"DR15_4" : ( 33494, 33495 ),
"DR15a_4" : ( 33495, 33496 ),
"DR15b_4" : ( 33496, 33497 ),
"DR15_5" : ( 33497, 33498 ),
"DR15a_5" : ( 33498, 33499 ),
"DR15b_5" : ( 33499, 33500 ),
"DR16_" : ( 33500, 33501 ),
"DR16a_" : ( 33501, 33502 ),
"DR16_2" : ( 33502, 33503 ),
"DR16a_2" : ( 33503, 33504 ),
"DR16_3" : ( 33504, 33505 ),
"DR16a_3" : ( 33505, 33506 ),
"DR16_4" : ( 33506, 33507 ),
"DR16a_4" : ( 33507, 33508 ),
"DR16_5" : ( 33508, 33509 ),
"DR16a_5" : ( 33509, 33510 ),
"DR17_" : ( 33510, 33511 ),
"DR17a_" : ( 33511, 33512 ),
"DR17_2" : ( 33512, 33513 ),
"DR17a_2" : ( 33513, 33514 ),
"DR17_3" : ( 33514, 33515 ),
"DR17a_3" : ( 33515, 33516 ),
"DR17_4" : ( 33516, 33517 ),
"DR17a_4" : ( 33517, 33518 ),
"DR17_5" : ( 33518, 33519 ),
"DR17a_5" : ( 33519, 33520 ),
"DR18_1_" : ( 33520, 33521 ),
"DR18_2_" : ( 33521, 33522 ),
"DR18_3_" : ( 33522, 33523 ),
"DR18_4_" : ( 33523, 33524 ),
"DR18_5_" : ( 33524, 33525 ),
"DR18a_" : ( 33525, 33526 ),
"DR18_1_2" : ( 33526, 33527 ),
"DR18_2_2" : ( 33527, 33528 ),
"DR18_3_2" : ( 33528, 33529 ),
"DR18_4_2" : ( 33529, 33530 ),
"DR18_5_2" : ( 33530, 33531 ),
"DR18a_2" : ( 33531, 33532 ),
"DR18_1_3" : ( 33532, 33533 ),
"DR18_2_3" : ( 33533, 33534 ),
"DR18_3_3" : ( 33534, 33535 ),
"DR18_4_3" : ( 33535, 33536 ),
"DR18_5_3" : ( 33536, 33537 ),
"DR18a_3" : ( 33537, 33538 ),
"DR18_1_4" : ( 33538, 33539 ),
"DR18_2_4" : ( 33539, 33540 ),
"DR18_3_4" : ( 33540, 33541 ),
"DR18_4_4" : ( 33541, 33542 ),
"DR18_5_4" : ( 33542, 33543 ),
"DR18a_4" : ( 33543, 33544 ),
"DR18_1_5" : ( 33544, 33545 ),
"DR18_2_5" : ( 33545, 33546 ),
"DR18_3_5" : ( 33546, 33547 ),
"DR18_4_5" : ( 33547, 33548 ),
"DR18_5_5" : ( 33548, 33549 ),
"DR18a_5" : ( 33549, 33550 ),
"DR19Qsx" : ( 33550, 33551 ),
"DR19Qsx2" : ( 33551, 33552 ),
"DR19Qsx3" : ( 33552, 33553 ),
"DR19Qsx4" : ( 33553, 33554 ),
"DR19Qsx5" : ( 33554, 33555 ),
"DR19Qsx6" : ( 33555, 33556 ),
"DR19Qsx7" : ( 33556, 33557 ),
"DR19Qsx8" : ( 33557, 33558 ),
"DR19Qsx9" : ( 33558, 33559 ),
"DR19Qsx10" : ( 33559, 33560 ),
"DR19Qsx11" : ( 33560, 33561 ),
"DR19Qsx12" : ( 33561, 33562 ),
"DR19Qsx13" : ( 33562, 33563 ),
"DR19Qsx14" : ( 33563, 33564 ),
"DR19Qsx15" : ( 33564, 33565 ),
"DR19Qsx16" : ( 33565, 33566 ),
"DR19Qsx17" : ( 33566, 33567 ),
"DR19Qsx18" : ( 33567, 33568 ),
"DR19Qsx19" : ( 33568, 33569 ),
"DR19aQsx" : ( 33569, 33570 ),
"DR19aQsx2" : ( 33570, 33571 ),
"DR19aQsx3" : ( 33571, 33572 ),
"DR19aQsx4" : ( 33572, 33573 ),
"DR19aQsx5" : ( 33573, 33574 ),
"DR19aQsx6" : ( 33574, 33575 ),
"DR19aQsx7" : ( 33575, 33576 ),
"DR19aQsx8" : ( 33576, 33577 ),
"DR19aQsx9" : ( 33577, 33578 ),
"DR19aQsx10" : ( 33578, 33579 ),
"DR19aQsx11" : ( 33579, 33580 ),
"DR19aQsx12" : ( 33580, 33581 ),
"DR19aQsx13" : ( 33581, 33582 ),
"DR19aQsx14" : ( 33582, 33583 ),
"DR19aQsx15" : ( 33583, 33584 ),
"DR19aQsx16" : ( 33584, 33585 ),
"DR19aQsx17" : ( 33585, 33586 ),
"DR19aQsx18" : ( 33586, 33587 ),
"DR19aQsx19" : ( 33587, 33588 ),
"DR19SxAgeOns_" : ( 33588, 33590 ),
"DR19SxOns_" : ( 33590, 33591 ),
"DR19SxAgeRec_" : ( 33591, 33593 ),
"DR19SxRec_" : ( 33593, 33594 ),
"DR5YrCl_" : ( 33594, 33595 ),
"DR6YrCl_" : ( 33595, 33596 ),
"DR7YrCl_" : ( 33596, 33597 ),
"DR7aYrCl_" : ( 33597, 33598 ),
"DR7bYrCl_" : ( 33598, 33599 ),
"DR8YrCl_" : ( 33599, 33600 ),
"DR9aYrCl_" : ( 33600, 33601 ),
"DR10YrCl_" : ( 33601, 33602 ),
"DR11bYrCl_" : ( 33602, 33603 ),
"DR11e2YrCl_" : ( 33603, 33604 ),
"DR12a2YrCl_" : ( 33604, 33605 ),
"DR12b1YrCl_" : ( 33605, 33606 ),
"DR12c1YrCl_" : ( 33606, 33607 ),
"DR13dYrCl_" : ( 33607, 33608 ),
"DR14bYrCl_" : ( 33608, 33609 ),
"DR18aYrCl_" : ( 33609, 33610 ),
"DR15aYrCl_" : ( 33610, 33611 ),
"DR16YrCl_" : ( 33611, 33612 ),
"DR17YrCl_" : ( 33612, 33613 ),
"DR5MnthCl_" : ( 33613, 33614 ),
"DR6MnthCl_" : ( 33614, 33615 ),
"DR7MnthCl_" : ( 33615, 33616 ),
"DR7aMnthCl_" : ( 33616, 33617 ),
"DR7bMnthCl_" : ( 33617, 33618 ),
"DR8MnthCl_" : ( 33618, 33619 ),
"DR9aMnthCl_" : ( 33619, 33620 ),
"DR10MnthCl_" : ( 33620, 33621 ),
"DR11bMnthCl_" : ( 33621, 33622 ),
"DR11e2MnthCl_" : ( 33622, 33623 ),
"DR12a2MnthCl_" : ( 33623, 33624 ),
"DR12b1MnthCl_" : ( 33624, 33625 ),
"DR12c1MnthCl_" : ( 33625, 33626 ),
"DR13dMnthCl_" : ( 33626, 33627 ),
"DR14bMnthCl_" : ( 33627, 33628 ),
"DR18aMnthCl_" : ( 33628, 33629 ),
"DR15aMnthCl_" : ( 33629, 33630 ),
"DR16MnthCl_" : ( 33630, 33631 ),
"DR17MnthCl_" : ( 33631, 33632 ),
"DR19AgeOns_" : ( 33632, 33634 ),
"DR19Ons_" : ( 33634, 33635 ),
"DR19AgeRec_" : ( 33635, 33637 ),
"DR19Rec_" : ( 33637, 33638 ),
"DRSxCount" : ( 33638, 33640 ),
"DRYrClCount" : ( 33640, 33642 ),
"DRMnthClCount" : ( 33642, 33644 ),
"DR19Qsx20" : ( 33644, 33645 ),
"DR19Qsx21" : ( 33645, 33646 ),
"DR19Qsx22" : ( 33646, 33647 ),
"DR19Qsx23" : ( 33647, 33648 ),
"DR19Qsx24" : ( 33648, 33649 ),
"DR19Qsx25" : ( 33649, 33650 ),
"DR19Qsx26" : ( 33650, 33651 ),
"DR19Qsx27" : ( 33651, 33652 ),
"DR19Qsx28" : ( 33652, 33653 ),
"DR19Qsx29" : ( 33653, 33654 ),
"DR19Qsx30" : ( 33654, 33655 ),
"DR19Qsx31" : ( 33655, 33656 ),
"DR19Qsx32" : ( 33656, 33657 ),
"DR19Qsx33" : ( 33657, 33658 ),
"DR19Qsx34" : ( 33658, 33659 ),
"DR19Qsx35" : ( 33659, 33660 ),
"DR19Qsx36" : ( 33660, 33661 ),
"DR19Qsx37" : ( 33661, 33662 ),
"DR19Qsx38" : ( 33662, 33663 ),
"DR19aQsx20" : ( 33663, 33664 ),
"DR19aQsx21" : ( 33664, 33665 ),
"DR19aQsx22" : ( 33665, 33666 ),
"DR19aQsx23" : ( 33666, 33667 ),
"DR19aQsx24" : ( 33667, 33668 ),
"DR19aQsx25" : ( 33668, 33669 ),
"DR19aQsx26" : ( 33669, 33670 ),
"DR19aQsx27" : ( 33670, 33671 ),
"DR19aQsx28" : ( 33671, 33672 ),
"DR19aQsx29" : ( 33672, 33673 ),
"DR19aQsx30" : ( 33673, 33674 ),
"DR19aQsx31" : ( 33674, 33675 ),
"DR19aQsx32" : ( 33675, 33676 ),
"DR19aQsx33" : ( 33676, 33677 ),
"DR19aQsx34" : ( 33677, 33678 ),
"DR19aQsx35" : ( 33678, 33679 ),
"DR19aQsx36" : ( 33679, 33680 ),
"DR19aQsx37" : ( 33680, 33681 ),
"DR19aQsx38" : ( 33681, 33682 ),
"DR19SxAgeOns_2" : ( 33682, 33684 ),
"DR19SxOns_2" : ( 33684, 33685 ),
"DR19SxAgeRec_2" : ( 33685, 33687 ),
"DR19SxRec_2" : ( 33687, 33688 ),
"DR5YrCl_2" : ( 33688, 33689 ),
"DR6YrCl_2" : ( 33689, 33690 ),
"DR7YrCl_2" : ( 33690, 33691 ),
"DR7aYrCl_2" : ( 33691, 33692 ),
"DR7bYrCl_2" : ( 33692, 33693 ),
"DR8YrCl_2" : ( 33693, 33694 ),
"DR9aYrCl_2" : ( 33694, 33695 ),
"DR10YrCl_2" : ( 33695, 33696 ),
"DR11bYrCl_2" : ( 33696, 33697 ),
"DR11e2YrCl_2" : ( 33697, 33698 ),
"DR12a2YrCl_2" : ( 33698, 33699 ),
"DR12b1YrCl_2" : ( 33699, 33700 ),
"DR12c1YrCl_2" : ( 33700, 33701 ),
"DR13dYrCl_2" : ( 33701, 33702 ),
"DR14bYrCl_2" : ( 33702, 33703 ),
"DR18aYrCl_2" : ( 33703, 33704 ),
"DR15aYrCl_2" : ( 33704, 33705 ),
"DR16YrCl_2" : ( 33705, 33706 ),
"DR17YrCl_2" : ( 33706, 33707 ),
"DR5MnthCl_2" : ( 33707, 33708 ),
"DR6MnthCl_2" : ( 33708, 33709 ),
"DR7MnthCl_2" : ( 33709, 33710 ),
"DR7aMnthCl_2" : ( 33710, 33711 ),
"DR7bMnthCl_2" : ( 33711, 33712 ),
"DR8MnthCl_2" : ( 33712, 33713 ),
"DR9aMnthCl_2" : ( 33713, 33714 ),
"DR10MnthCl_2" : ( 33714, 33715 ),
"DR11bMnthCl_2" : ( 33715, 33716 ),
"DR11e2MnthCl_2" : ( 33716, 33717 ),
"DR12a2MnthCl_2" : ( 33717, 33718 ),
"DR12b1MnthCl_2" : ( 33718, 33719 ),
"DR12c1MnthCl_2" : ( 33719, 33720 ),
"DR13dMnthCl_2" : ( 33720, 33721 ),
"DR14bMnthCl_2" : ( 33721, 33722 ),
"DR18aMnthCl_2" : ( 33722, 33723 ),
"DR15aMnthCl_2" : ( 33723, 33724 ),
"DR16MnthCl_2" : ( 33724, 33725 ),
"DR17MnthCl_2" : ( 33725, 33726 ),
"DR19AgeOns_2" : ( 33726, 33728 ),
"DR19Ons_2" : ( 33728, 33729 ),
"DR19AgeRec_2" : ( 33729, 33731 ),
"DR19Rec_2" : ( 33731, 33732 ),
"DRSxCount2" : ( 33732, 33734 ),
"DRYrClCount2" : ( 33734, 33736 ),
"DRMnthClCount2" : ( 33736, 33738 ),
"DR19Qsx39" : ( 33738, 33739 ),
"DR19Qsx40" : ( 33739, 33740 ),
"DR19Qsx41" : ( 33740, 33741 ),
"DR19Qsx42" : ( 33741, 33742 ),
"DR19Qsx43" : ( 33742, 33743 ),
"DR19Qsx44" : ( 33743, 33744 ),
"DR19Qsx45" : ( 33744, 33745 ),
"DR19Qsx46" : ( 33745, 33746 ),
"DR19Qsx47" : ( 33746, 33747 ),
"DR19Qsx48" : ( 33747, 33748 ),
"DR19Qsx49" : ( 33748, 33749 ),
"DR19Qsx50" : ( 33749, 33750 ),
"DR19Qsx51" : ( 33750, 33751 ),
"DR19Qsx52" : ( 33751, 33752 ),
"DR19Qsx53" : ( 33752, 33753 ),
"DR19Qsx54" : ( 33753, 33754 ),
"DR19Qsx55" : ( 33754, 33755 ),
"DR19Qsx56" : ( 33755, 33756 ),
"DR19Qsx57" : ( 33756, 33757 ),
"DR19aQsx39" : ( 33757, 33758 ),
"DR19aQsx40" : ( 33758, 33759 ),
"DR19aQsx41" : ( 33759, 33760 ),
"DR19aQsx42" : ( 33760, 33761 ),
"DR19aQsx43" : ( 33761, 33762 ),
"DR19aQsx44" : ( 33762, 33763 ),
"DR19aQsx45" : ( 33763, 33764 ),
"DR19aQsx46" : ( 33764, 33765 ),
"DR19aQsx47" : ( 33765, 33766 ),
"DR19aQsx48" : ( 33766, 33767 ),
"DR19aQsx49" : ( 33767, 33768 ),
"DR19aQsx50" : ( 33768, 33769 ),
"DR19aQsx51" : ( 33769, 33770 ),
"DR19aQsx52" : ( 33770, 33771 ),
"DR19aQsx53" : ( 33771, 33772 ),
"DR19aQsx54" : ( 33772, 33773 ),
"DR19aQsx55" : ( 33773, 33774 ),
"DR19aQsx56" : ( 33774, 33775 ),
"DR19aQsx57" : ( 33775, 33776 ),
"DR19SxAgeOns_3" : ( 33776, 33778 ),
"DR19SxOns_3" : ( 33778, 33779 ),
"DR19SxAgeRec_3" : ( 33779, 33781 ),
"DR19SxRec_3" : ( 33781, 33782 ),
"DR5YrCl_3" : ( 33782, 33783 ),
"DR6YrCl_3" : ( 33783, 33784 ),
"DR7YrCl_3" : ( 33784, 33785 ),
"DR7aYrCl_3" : ( 33785, 33786 ),
"DR7bYrCl_3" : ( 33786, 33787 ),
"DR8YrCl_3" : ( 33787, 33788 ),
"DR9aYrCl_3" : ( 33788, 33789 ),
"DR10YrCl_3" : ( 33789, 33790 ),
"DR11bYrCl_3" : ( 33790, 33791 ),
"DR11e2YrCl_3" : ( 33791, 33792 ),
"DR12a2YrCl_3" : ( 33792, 33793 ),
"DR12b1YrCl_3" : ( 33793, 33794 ),
"DR12c1YrCl_3" : ( 33794, 33795 ),
"DR13dYrCl_3" : ( 33795, 33796 ),
"DR14bYrCl_3" : ( 33796, 33797 ),
"DR18aYrCl_3" : ( 33797, 33798 ),
"DR15aYrCl_3" : ( 33798, 33799 ),
"DR16YrCl_3" : ( 33799, 33800 ),
"DR17YrCl_3" : ( 33800, 33801 ),
"DR5MnthCl_3" : ( 33801, 33802 ),
"DR6MnthCl_3" : ( 33802, 33803 ),
"DR7MnthCl_3" : ( 33803, 33804 ),
"DR7aMnthCl_3" : ( 33804, 33805 ),
"DR7bMnthCl_3" : ( 33805, 33806 ),
"DR8MnthCl_3" : ( 33806, 33807 ),
"DR9aMnthCl_3" : ( 33807, 33808 ),
"DR10MnthCl_3" : ( 33808, 33809 ),
"DR11bMnthCl_3" : ( 33809, 33810 ),
"DR11e2MnthCl_3" : ( 33810, 33811 ),
"DR12a2MnthCl_3" : ( 33811, 33812 ),
"DR12b1MnthCl_3" : ( 33812, 33813 ),
"DR12c1MnthCl_3" : ( 33813, 33814 ),
"DR13dMnthCl_3" : ( 33814, 33815 ),
"DR14bMnthCl_3" : ( 33815, 33816 ),
"DR18aMnthCl_3" : ( 33816, 33817 ),
"DR15aMnthCl_3" : ( 33817, 33818 ),
"DR16MnthCl_3" : ( 33818, 33819 ),
"DR17MnthCl_3" : ( 33819, 33820 ),
"DR19AgeOns_3" : ( 33820, 33822 ),
"DR19Ons_3" : ( 33822, 33823 ),
"DR19AgeRec_3" : ( 33823, 33825 ),
"DR19Rec_3" : ( 33825, 33826 ),
"DRSxCount3" : ( 33826, 33828 ),
"DRYrClCount3" : ( 33828, 33830 ),
"DRMnthClCount3" : ( 33830, 33832 ),
"DR19Qsx58" : ( 33832, 33833 ),
"DR19Qsx59" : ( 33833, 33834 ),
"DR19Qsx60" : ( 33834, 33835 ),
"DR19Qsx61" : ( 33835, 33836 ),
"DR19Qsx62" : ( 33836, 33837 ),
"DR19Qsx63" : ( 33837, 33838 ),
"DR19Qsx64" : ( 33838, 33839 ),
"DR19Qsx65" : ( 33839, 33840 ),
"DR19Qsx66" : ( 33840, 33841 ),
"DR19Qsx67" : ( 33841, 33842 ),
"DR19Qsx68" : ( 33842, 33843 ),
"DR19Qsx69" : ( 33843, 33844 ),
"DR19Qsx70" : ( 33844, 33845 ),
"DR19Qsx71" : ( 33845, 33846 ),
"DR19Qsx72" : ( 33846, 33847 ),
"DR19Qsx73" : ( 33847, 33848 ),
"DR19Qsx74" : ( 33848, 33849 ),
"DR19Qsx75" : ( 33849, 33850 ),
"DR19Qsx76" : ( 33850, 33851 ),
"DR19aQsx58" : ( 33851, 33852 ),
"DR19aQsx59" : ( 33852, 33853 ),
"DR19aQsx60" : ( 33853, 33854 ),
"DR19aQsx61" : ( 33854, 33855 ),
"DR19aQsx62" : ( 33855, 33856 ),
"DR19aQsx63" : ( 33856, 33857 ),
"DR19aQsx64" : ( 33857, 33858 ),
"DR19aQsx65" : ( 33858, 33859 ),
"DR19aQsx66" : ( 33859, 33860 ),
"DR19aQsx67" : ( 33860, 33861 ),
"DR19aQsx68" : ( 33861, 33862 ),
"DR19aQsx69" : ( 33862, 33863 ),
"DR19aQsx70" : ( 33863, 33864 ),
"DR19aQsx71" : ( 33864, 33865 ),
"DR19aQsx72" : ( 33865, 33866 ),
"DR19aQsx73" : ( 33866, 33867 ),
"DR19aQsx74" : ( 33867, 33868 ),
"DR19aQsx75" : ( 33868, 33869 ),
"DR19aQsx76" : ( 33869, 33870 ),
"DR19SxAgeOns_4" : ( 33870, 33872 ),
"DR19SxOns_4" : ( 33872, 33873 ),
"DR19SxAgeRec_4" : ( 33873, 33875 ),
"DR19SxRec_4" : ( 33875, 33876 ),
"DR5YrCl_4" : ( 33876, 33877 ),
"DR6YrCl_4" : ( 33877, 33878 ),
"DR7YrCl_4" : ( 33878, 33879 ),
"DR7aYrCl_4" : ( 33879, 33880 ),
"DR7bYrCl_4" : ( 33880, 33881 ),
"DR8YrCl_4" : ( 33881, 33882 ),
"DR9aYrCl_4" : ( 33882, 33883 ),
"DR10YrCl_4" : ( 33883, 33884 ),
"DR11bYrCl_4" : ( 33884, 33885 ),
"DR11e2YrCl_4" : ( 33885, 33886 ),
"DR12a2YrCl_4" : ( 33886, 33887 ),
"DR12b1YrCl_4" : ( 33887, 33888 ),
"DR12c1YrCl_4" : ( 33888, 33889 ),
"DR13dYrCl_4" : ( 33889, 33890 ),
"DR14bYrCl_4" : ( 33890, 33891 ),
"DR18aYrCl_4" : ( 33891, 33892 ),
"DR15aYrCl_4" : ( 33892, 33893 ),
"DR16YrCl_4" : ( 33893, 33894 ),
"DR17YrCl_4" : ( 33894, 33895 ),
"DR5MnthCl_4" : ( 33895, 33896 ),
"DR6MnthCl_4" : ( 33896, 33897 ),
"DR7MnthCl_4" : ( 33897, 33898 ),
"DR7aMnthCl_4" : ( 33898, 33899 ),
"DR7bMnthCl_4" : ( 33899, 33900 ),
"DR8MnthCl_4" : ( 33900, 33901 ),
"DR9aMnthCl_4" : ( 33901, 33902 ),
"DR10MnthCl_4" : ( 33902, 33903 ),
"DR11bMnthCl_4" : ( 33903, 33904 ),
"DR11e2MnthCl_4" : ( 33904, 33905 ),
"DR12a2MnthCl_4" : ( 33905, 33906 ),
"DR12b1MnthCl_4" : ( 33906, 33907 ),
"DR12c1MnthCl_4" : ( 33907, 33908 ),
"DR13dMnthCl_4" : ( 33908, 33909 ),
"DR14bMnthCl_4" : ( 33909, 33910 ),
"DR18aMnthCl_4" : ( 33910, 33911 ),
"DR15aMnthCl_4" : ( 33911, 33912 ),
"DR16MnthCl_4" : ( 33912, 33913 ),
"DR17MnthCl_4" : ( 33913, 33914 ),
"DR19AgeOns_4" : ( 33914, 33916 ),
"DR19Ons_4" : ( 33916, 33917 ),
"DR19AgeRec_4" : ( 33917, 33919 ),
"DR19Rec_4" : ( 33919, 33920 ),
"DRSxCount4" : ( 33920, 33922 ),
"DRYrClCount4" : ( 33922, 33924 ),
"DRMnthClCount4" : ( 33924, 33926 ),
"DR19Qsx77" : ( 33926, 33927 ),
"DR19Qsx78" : ( 33927, 33928 ),
"DR19Qsx79" : ( 33928, 33929 ),
"DR19Qsx80" : ( 33929, 33930 ),
"DR19Qsx81" : ( 33930, 33931 ),
"DR19Qsx82" : ( 33931, 33932 ),
"DR19Qsx83" : ( 33932, 33933 ),
"DR19Qsx84" : ( 33933, 33934 ),
"DR19Qsx85" : ( 33934, 33935 ),
"DR19Qsx86" : ( 33935, 33936 ),
"DR19Qsx87" : ( 33936, 33937 ),
"DR19Qsx88" : ( 33937, 33938 ),
"DR19Qsx89" : ( 33938, 33939 ),
"DR19Qsx90" : ( 33939, 33940 ),
"DR19Qsx91" : ( 33940, 33941 ),
"DR19Qsx92" : ( 33941, 33942 ),
"DR19Qsx93" : ( 33942, 33943 ),
"DR19Qsx94" : ( 33943, 33944 ),
"DR19Qsx95" : ( 33944, 33945 ),
"DR19aQsx77" : ( 33945, 33946 ),
"DR19aQsx78" : ( 33946, 33947 ),
"DR19aQsx79" : ( 33947, 33948 ),
"DR19aQsx80" : ( 33948, 33949 ),
"DR19aQsx81" : ( 33949, 33950 ),
"DR19aQsx82" : ( 33950, 33951 ),
"DR19aQsx83" : ( 33951, 33952 ),
"DR19aQsx84" : ( 33952, 33953 ),
"DR19aQsx85" : ( 33953, 33954 ),
"DR19aQsx86" : ( 33954, 33955 ),
"DR19aQsx87" : ( 33955, 33956 ),
"DR19aQsx88" : ( 33956, 33957 ),
"DR19aQsx89" : ( 33957, 33958 ),
"DR19aQsx90" : ( 33958, 33959 ),
"DR19aQsx91" : ( 33959, 33960 ),
"DR19aQsx92" : ( 33960, 33961 ),
"DR19aQsx93" : ( 33961, 33962 ),
"DR19aQsx94" : ( 33962, 33963 ),
"DR19aQsx95" : ( 33963, 33964 ),
"DR19SxAgeOns_5" : ( 33964, 33966 ),
"DR19SxOns_5" : ( 33966, 33967 ),
"DR19SxAgeRec_5" : ( 33967, 33969 ),
"DR19SxRec_5" : ( 33969, 33970 ),
"DR5YrCl_5" : ( 33970, 33971 ),
"DR6YrCl_5" : ( 33971, 33972 ),
"DR7YrCl_5" : ( 33972, 33973 ),
"DR7aYrCl_5" : ( 33973, 33974 ),
"DR7bYrCl_5" : ( 33974, 33975 ),
"DR8YrCl_5" : ( 33975, 33976 ),
"DR9aYrCl_5" : ( 33976, 33977 ),
"DR10YrCl_5" : ( 33977, 33978 ),
"DR11bYrCl_5" : ( 33978, 33979 ),
"DR11e2YrCl_5" : ( 33979, 33980 ),
"DR12a2YrCl_5" : ( 33980, 33981 ),
"DR12b1YrCl_5" : ( 33981, 33982 ),
"DR12c1YrCl_5" : ( 33982, 33983 ),
"DR13dYrCl_5" : ( 33983, 33984 ),
"DR14bYrCl_5" : ( 33984, 33985 ),
"DR18aYrCl_5" : ( 33985, 33986 ),
"DR15aYrCl_5" : ( 33986, 33987 ),
"DR16YrCl_5" : ( 33987, 33988 ),
"DR17YrCl_5" : ( 33988, 33989 ),
"DR5MnthCl_5" : ( 33989, 33990 ),
"DR6MnthCl_5" : ( 33990, 33991 ),
"DR7MnthCl_5" : ( 33991, 33992 ),
"DR7aMnthCl_5" : ( 33992, 33993 ),
"DR7bMnthCl_5" : ( 33993, 33994 ),
"DR8MnthCl_5" : ( 33994, 33995 ),
"DR9aMnthCl_5" : ( 33995, 33996 ),
"DR10MnthCl_5" : ( 33996, 33997 ),
"DR11bMnthCl_5" : ( 33997, 33998 ),
"DR11e2MnthCl_5" : ( 33998, 33999 ),
"DR12a2MnthCl_5" : ( 33999, 34000 ),
"DR12b1MnthCl_5" : ( 34000, 34001 ),
"DR12c1MnthCl_5" : ( 34001, 34002 ),
"DR13dMnthCl_5" : ( 34002, 34003 ),
"DR14bMnthCl_5" : ( 34003, 34004 ),
"DR18aMnthCl_5" : ( 34004, 34005 ),
"DR15aMnthCl_5" : ( 34005, 34006 ),
"DR16MnthCl_5" : ( 34006, 34007 ),
"DR17MnthCl_5" : ( 34007, 34008 ),
"DR19AgeOns_5" : ( 34008, 34010 ),
"DR19Ons_5" : ( 34010, 34011 ),
"DR19AgeRec_5" : ( 34011, 34013 ),
"DR19Rec_5" : ( 34013, 34014 ),
"DRSxCount5" : ( 34014, 34016 ),
"DRYrClCount5" : ( 34016, 34018 ),
"DRMnthClCount5" : ( 34018, 34020 ),
"DR22FromMnth" : ( 34020, 34022 ),
"DR22FromYr" : ( 34022, 34026 ),
"DR22ToMnth" : ( 34026, 34028 ),
"DR22ToYR" : ( 34028, 34032 ),
"DR22FromMnth2" : ( 34032, 34034 ),
"DR22FromYr2" : ( 34034, 34038 ),
"DR22ToMnth2" : ( 34038, 34040 ),
"DR22ToYR2" : ( 34040, 34044 ),
"DR22FromMnth3" : ( 34044, 34046 ),
"DR22FromYr3" : ( 34046, 34050 ),
"DR22ToMnth3" : ( 34050, 34052 ),
"DR22ToYR3" : ( 34052, 34056 ),
"DR22FromMnth4" : ( 34056, 34058 ),
"DR22FromYr4" : ( 34058, 34062 ),
"DR22ToMnth4" : ( 34062, 34064 ),
"DR22ToYR4" : ( 34064, 34068 ),
"DR22FromMnth5" : ( 34068, 34070 ),
"DR22FromYr5" : ( 34070, 34074 ),
"DR22ToMnth5" : ( 34074, 34076 ),
"DR22ToYR5" : ( 34076, 34080 ),
"DR1a_10" : ( 34080, 34084 ),
"DR1a1_10" : ( 34084, 34085 ),
"DR1a2_10" : ( 34085, 34086 ),
"DR1bAgeOns_10" : ( 34086, 34088 ),
"DR1bOns_10" : ( 34088, 34089 ),
"DR1bAgeRec_10" : ( 34089, 34091 ),
"DR1bRec_10" : ( 34091, 34092 ),
"DR1c_10" : ( 34092, 34093 ),
"DR1a_11" : ( 34093, 34097 ),
"DR1a1_11" : ( 34097, 34098 ),
"DR1a2_11" : ( 34098, 34099 ),
"DR1bAgeOns_11" : ( 34099, 34101 ),
"DR1bOns_11" : ( 34101, 34102 ),
"DR1bAgeRec_11" : ( 34102, 34104 ),
"DR1bRec_11" : ( 34104, 34105 ),
"DR1c_11" : ( 34105, 34106 ),
"DR1a_12" : ( 34106, 34110 ),
"DR1a1_12" : ( 34110, 34111 ),
"DR1a2_12" : ( 34111, 34112 ),
"DR1bAgeOns_12" : ( 34112, 34114 ),
"DR1bOns_12" : ( 34114, 34115 ),
"DR1bAgeRec_12" : ( 34115, 34117 ),
"DR1bRec_12" : ( 34117, 34118 ),
"DR1c_12" : ( 34118, 34119 ),
"DR1a_13" : ( 34119, 34123 ),
"DR1a1_13" : ( 34123, 34124 ),
"DR1a2_13" : ( 34124, 34125 ),
"DR1bAgeOns_13" : ( 34125, 34127 ),
"DR1bOns_13" : ( 34127, 34128 ),
"DR1bAgeRec_13" : ( 34128, 34130 ),
"DR1bRec_13" : ( 34130, 34131 ),
"DR1c_13" : ( 34131, 34132 ),
"DR1a_14" : ( 34132, 34136 ),
"DR1a1_14" : ( 34136, 34137 ),
"DR1a2_14" : ( 34137, 34138 ),
"DR1bAgeOns_14" : ( 34138, 34140 ),
"DR1bOns_14" : ( 34140, 34141 ),
"DR1bAgeRec_14" : ( 34141, 34143 ),
"DR1bRec_14" : ( 34143, 34144 ),
"DR1c_14" : ( 34144, 34145 ),
"DR22_" : ( 34145, 34148 ),
"DR22A_" : ( 34148, 34150 ),
"DR22FromMnth6" : ( 34150, 34152 ),
"DR22FromYr6" : ( 34152, 34156 ),
"DR22ToMnth6" : ( 34156, 34158 ),
"DR22ToYR6" : ( 34158, 34162 ),
"DR22FromMnth7" : ( 34162, 34164 ),
"DR22FromYr7" : ( 34164, 34168 ),
"DR22ToMnth7" : ( 34168, 34170 ),
"DR22ToYR7" : ( 34170, 34174 ),
"DR22FromMnth8" : ( 34174, 34176 ),
"DR22FromYr8" : ( 34176, 34180 ),
"DR22ToMnth8" : ( 34180, 34182 ),
"DR22ToYR8" : ( 34182, 34186 ),
"DR22FromMnth9" : ( 34186, 34188 ),
"DR22FromYr9" : ( 34188, 34192 ),
"DR22ToMnth9" : ( 34192, 34194 ),
"DR22ToYR9" : ( 34194, 34198 ),
"DR22FromMnth10" : ( 34198, 34200 ),
"DR22FromYr10" : ( 34200, 34204 ),
"DR22ToMnth10" : ( 34204, 34206 ),
"DR22ToYR10" : ( 34206, 34210 ),
"DR1a_15" : ( 34210, 34214 ),
"DR1a1_15" : ( 34214, 34215 ),
"DR1a2_15" : ( 34215, 34216 ),
"DR1bAgeOns_15" : ( 34216, 34218 ),
"DR1bOns_15" : ( 34218, 34219 ),
"DR1bAgeRec_15" : ( 34219, 34221 ),
"DR1bRec_15" : ( 34221, 34222 ),
"DR1c_15" : ( 34222, 34223 ),
"DR1a_16" : ( 34223, 34227 ),
"DR1a1_16" : ( 34227, 34228 ),
"DR1a2_16" : ( 34228, 34229 ),
"DR1bAgeOns_16" : ( 34229, 34231 ),
"DR1bOns_16" : ( 34231, 34232 ),
"DR1bAgeRec_16" : ( 34232, 34234 ),
"DR1bRec_16" : ( 34234, 34235 ),
"DR1c_16" : ( 34235, 34236 ),
"DR1a_17" : ( 34236, 34240 ),
"DR1a1_17" : ( 34240, 34241 ),
"DR1a2_17" : ( 34241, 34242 ),
"DR1bAgeOns_17" : ( 34242, 34244 ),
"DR1bOns_17" : ( 34244, 34245 ),
"DR1bAgeRec_17" : ( 34245, 34247 ),
"DR1bRec_17" : ( 34247, 34248 ),
"DR1c_17" : ( 34248, 34249 ),
"DR1a_18" : ( 34249, 34253 ),
"DR1a1_18" : ( 34253, 34254 ),
"DR1a2_18" : ( 34254, 34255 ),
"DR1bAgeOns_18" : ( 34255, 34257 ),
"DR1bOns_18" : ( 34257, 34258 ),
"DR1bAgeRec_18" : ( 34258, 34260 ),
"DR1bRec_18" : ( 34260, 34261 ),
"DR1c_18" : ( 34261, 34262 ),
"DR1a_19" : ( 34262, 34266 ),
"DR1a1_19" : ( 34266, 34267 ),
"DR1a2_19" : ( 34267, 34268 ),
"DR1bAgeOns_19" : ( 34268, 34270 ),
"DR1bOns_19" : ( 34270, 34271 ),
"DR1bAgeRec_19" : ( 34271, 34273 ),
"DR1bRec_19" : ( 34273, 34274 ),
"DR1c_19" : ( 34274, 34275 ),
"DR22_2" : ( 34275, 34278 ),
"DR22A_2" : ( 34278, 34280 ),
"DR22FromMnth11" : ( 34280, 34282 ),
"DR22FromYr11" : ( 34282, 34286 ),
"DR22ToMnth11" : ( 34286, 34288 ),
"DR22ToYR11" : ( 34288, 34292 ),
"DR22FromMnth12" : ( 34292, 34294 ),
"DR22FromYr12" : ( 34294, 34298 ),
"DR22ToMnth12" : ( 34298, 34300 ),
"DR22ToYR12" : ( 34300, 34304 ),
"DR22FromMnth13" : ( 34304, 34306 ),
"DR22FromYr13" : ( 34306, 34310 ),
"DR22ToMnth13" : ( 34310, 34312 ),
"DR22ToYR13" : ( 34312, 34316 ),
"DR22FromMnth14" : ( 34316, 34318 ),
"DR22FromYr14" : ( 34318, 34322 ),
"DR22ToMnth14" : ( 34322, 34324 ),
"DR22ToYR14" : ( 34324, 34328 ),
"DR22FromMnth15" : ( 34328, 34330 ),
"DR22FromYr15" : ( 34330, 34334 ),
"DR22ToMnth15" : ( 34334, 34336 ),
"DR22ToYR15" : ( 34336, 34340 ),
"DR1a_20" : ( 34340, 34344 ),
"DR1a1_20" : ( 34344, 34345 ),
"DR1a2_20" : ( 34345, 34346 ),
"DR1bAgeOns_20" : ( 34346, 34348 ),
"DR1bOns_20" : ( 34348, 34349 ),
"DR1bAgeRec_20" : ( 34349, 34351 ),
"DR1bRec_20" : ( 34351, 34352 ),
"DR1c_20" : ( 34352, 34353 ),
"DR1a_21" : ( 34353, 34357 ),
"DR1a1_21" : ( 34357, 34358 ),
"DR1a2_21" : ( 34358, 34359 ),
"DR1bAgeOns_21" : ( 34359, 34361 ),
"DR1bOns_21" : ( 34361, 34362 ),
"DR1bAgeRec_21" : ( 34362, 34364 ),
"DR1bRec_21" : ( 34364, 34365 ),
"DR1c_21" : ( 34365, 34366 ),
"DR1a_22" : ( 34366, 34370 ),
"DR1a1_22" : ( 34370, 34371 ),
"DR1a2_22" : ( 34371, 34372 ),
"DR1bAgeOns_22" : ( 34372, 34374 ),
"DR1bOns_22" : ( 34374, 34375 ),
"DR1bAgeRec_22" : ( 34375, 34377 ),
"DR1bRec_22" : ( 34377, 34378 ),
"DR1c_22" : ( 34378, 34379 ),
"DR1a_23" : ( 34379, 34383 ),
"DR1a1_23" : ( 34383, 34384 ),
"DR1a2_23" : ( 34384, 34385 ),
"DR1bAgeOns_23" : ( 34385, 34387 ),
"DR1bOns_23" : ( 34387, 34388 ),
"DR1bAgeRec_23" : ( 34388, 34390 ),
"DR1bRec_23" : ( 34390, 34391 ),
"DR1c_23" : ( 34391, 34392 ),
"DR1a_24" : ( 34392, 34396 ),
"DR1a1_24" : ( 34396, 34397 ),
"DR1a2_24" : ( 34397, 34398 ),
"DR1bAgeOns_24" : ( 34398, 34400 ),
"DR1bOns_24" : ( 34400, 34401 ),
"DR1bAgeRec_24" : ( 34401, 34403 ),
"DR1bRec_24" : ( 34403, 34404 ),
"DR1c_24" : ( 34404, 34405 ),
"DR22_3" : ( 34405, 34408 ),
"DR22A_3" : ( 34408, 34410 ),
"DR22FromMnth16" : ( 34410, 34412 ),
"DR22FromYr16" : ( 34412, 34416 ),
"DR22ToMnth16" : ( 34416, 34418 ),
"DR22ToYR16" : ( 34418, 34422 ),
"DR22FromMnth17" : ( 34422, 34424 ),
"DR22FromYr17" : ( 34424, 34428 ),
"DR22ToMnth17" : ( 34428, 34430 ),
"DR22ToYR17" : ( 34430, 34434 ),
"DR22FromMnth18" : ( 34434, 34436 ),
"DR22FromYr18" : ( 34436, 34440 ),
"DR22ToMnth18" : ( 34440, 34442 ),
"DR22ToYR18" : ( 34442, 34446 ),
"DR22FromMnth19" : ( 34446, 34448 ),
"DR22FromYr19" : ( 34448, 34452 ),
"DR22ToMnth19" : ( 34452, 34454 ),
"DR22ToYR19" : ( 34454, 34458 ),
"DR22FromMnth20" : ( 34458, 34460 ),
"DR22FromYr20" : ( 34460, 34464 ),
"DR22ToMnth20" : ( 34464, 34466 ),
"DR22ToYR20" : ( 34466, 34470 ),
"DR1a_25" : ( 34470, 34474 ),
"DR1a1_25" : ( 34474, 34475 ),
"DR1a2_25" : ( 34475, 34476 ),
"DR1bAgeOns_25" : ( 34476, 34478 ),
"DR1bOns_25" : ( 34478, 34479 ),
"DR1bAgeRec_25" : ( 34479, 34481 ),
"DR1bRec_25" : ( 34481, 34482 ),
"DR1c_25" : ( 34482, 34483 ),
"DR1a_26" : ( 34483, 34487 ),
"DR1a1_26" : ( 34487, 34488 ),
"DR1a2_26" : ( 34488, 34489 ),
"DR1bAgeOns_26" : ( 34489, 34491 ),
"DR1bOns_26" : ( 34491, 34492 ),
"DR1bAgeRec_26" : ( 34492, 34494 ),
"DR1bRec_26" : ( 34494, 34495 ),
"DR1c_26" : ( 34495, 34496 ),
"DR1a_27" : ( 34496, 34500 ),
"DR1a1_27" : ( 34500, 34501 ),
"DR1a2_27" : ( 34501, 34502 ),
"DR1bAgeOns_27" : ( 34502, 34504 ),
"DR1bOns_27" : ( 34504, 34505 ),
"DR1bAgeRec_27" : ( 34505, 34507 ),
"DR1bRec_27" : ( 34507, 34508 ),
"DR1c_27" : ( 34508, 34509 ),
"DR1a_28" : ( 34509, 34513 ),
"DR1a1_28" : ( 34513, 34514 ),
"DR1a2_28" : ( 34514, 34515 ),
"DR1bAgeOns_28" : ( 34515, 34517 ),
"DR1bOns_28" : ( 34517, 34518 ),
"DR1bAgeRec_28" : ( 34518, 34520 ),
"DR1bRec_28" : ( 34520, 34521 ),
"DR1c_28" : ( 34521, 34522 ),
"DR1a_29" : ( 34522, 34526 ),
"DR1a1_29" : ( 34526, 34527 ),
"DR1a2_29" : ( 34527, 34528 ),
"DR1bAgeOns_29" : ( 34528, 34530 ),
"DR1bOns_29" : ( 34530, 34531 ),
"DR1bAgeRec_29" : ( 34531, 34533 ),
"DR1bRec_29" : ( 34533, 34534 ),
"DR1c_29" : ( 34534, 34535 ),
"DR22_4" : ( 34535, 34538 ),
"DR22A_4" : ( 34538, 34540 ),
"DR22FromMnth21" : ( 34540, 34542 ),
"DR22FromYr21" : ( 34542, 34546 ),
"DR22ToMnth21" : ( 34546, 34548 ),
"DR22ToYR21" : ( 34548, 34552 ),
"DR22FromMnth22" : ( 34552, 34554 ),
"DR22FromYr22" : ( 34554, 34558 ),
"DR22ToMnth22" : ( 34558, 34560 ),
"DR22ToYR22" : ( 34560, 34564 ),
"DR22FromMnth23" : ( 34564, 34566 ),
"DR22FromYr23" : ( 34566, 34570 ),
"DR22ToMnth23" : ( 34570, 34572 ),
"DR22ToYR23" : ( 34572, 34576 ),
"DR22FromMnth24" : ( 34576, 34578 ),
"DR22FromYr24" : ( 34578, 34582 ),
"DR22ToMnth24" : ( 34582, 34584 ),
"DR22ToYR24" : ( 34584, 34588 ),
"DR22FromMnth25" : ( 34588, 34590 ),
"DR22FromYr25" : ( 34590, 34594 ),
"DR22ToMnth25" : ( 34594, 34596 ),
"DR22ToYR25" : ( 34596, 34600 ),
"DR1a_30" : ( 34600, 34604 ),
"DR1a1_30" : ( 34604, 34605 ),
"DR1a2_30" : ( 34605, 34606 ),
"DR1bAgeOns_30" : ( 34606, 34608 ),
"DR1bOns_30" : ( 34608, 34609 ),
"DR1bAgeRec_30" : ( 34609, 34611 ),
"DR1bRec_30" : ( 34611, 34612 ),
"DR1c_30" : ( 34612, 34613 ),
"DR1a_31" : ( 34613, 34617 ),
"DR1a1_31" : ( 34617, 34618 ),
"DR1a2_31" : ( 34618, 34619 ),
"DR1bAgeOns_31" : ( 34619, 34621 ),
"DR1bOns_31" : ( 34621, 34622 ),
"DR1bAgeRec_31" : ( 34622, 34624 ),
"DR1bRec_31" : ( 34624, 34625 ),
"DR1c_31" : ( 34625, 34626 ),
"DR1a_32" : ( 34626, 34630 ),
"DR1a1_32" : ( 34630, 34631 ),
"DR1a2_32" : ( 34631, 34632 ),
"DR1bAgeOns_32" : ( 34632, 34634 ),
"DR1bOns_32" : ( 34634, 34635 ),
"DR1bAgeRec_32" : ( 34635, 34637 ),
"DR1bRec_32" : ( 34637, 34638 ),
"DR1c_32" : ( 34638, 34639 ),
"DR1a_33" : ( 34639, 34643 ),
"DR1a1_33" : ( 34643, 34644 ),
"DR1a2_33" : ( 34644, 34645 ),
"DR1bAgeOns_33" : ( 34645, 34647 ),
"DR1bOns_33" : ( 34647, 34648 ),
"DR1bAgeRec_33" : ( 34648, 34650 ),
"DR1bRec_33" : ( 34650, 34651 ),
"DR1c_33" : ( 34651, 34652 ),
"DR1a_34" : ( 34652, 34656 ),
"DR1a1_34" : ( 34656, 34657 ),
"DR1a2_34" : ( 34657, 34658 ),
"DR1bAgeOns_34" : ( 34658, 34660 ),
"DR1bOns_34" : ( 34660, 34661 ),
"DR1bAgeRec_34" : ( 34661, 34663 ),
"DR1bRec_34" : ( 34663, 34664 ),
"DR1c_34" : ( 34664, 34665 ),
"DR22_5" : ( 34665, 34668 ),
"DR22A_5" : ( 34668, 34670 ),
"DR23" : ( 34670, 34671 ),
"DR23a1" : ( 34671, 34672 ),
"DR23a2" : ( 34672, 34673 ),
"DR23a3" : ( 34673, 34674 ),
"DR23a4" : ( 34674, 34675 ),
"DR23a5" : ( 34675, 34676 ),
"DR23a6" : ( 34676, 34677 ),
"DR23a6_specify" : ( 34677, 34757 ),
"DR23AgeOns" : ( 34757, 34759 ),
"DR23Ons" : ( 34759, 34760 ),
"DR23AgeRec" : ( 34760, 34762 ),
"DR23Rec" : ( 34762, 34763 ),
"DR23c" : ( 34763, 34764 ),
"DR24" : ( 34764, 34765 ),
"DR24a1" : ( 34765, 34766 ),
"DR24a2" : ( 34766, 34767 ),
"DR24a3" : ( 34767, 34768 ),
"DR24a4" : ( 34768, 34769 ),
"DR24a5" : ( 34769, 34770 ),
"DR24a6" : ( 34770, 34771 ),
"DR24a6_Specify" : ( 34771, 34996 ),
"DR24bAgeOns" : ( 34996, 34998 ),
"DR24bOns" : ( 34998, 34999 ),
"DR24bAgeRec" : ( 34999, 35001 ),
"DR24bRec" : ( 35001, 35002 ),
"DR24c" : ( 35002, 35003 ),
"DR24d" : ( 35003, 35004 ),
"DR24dAgeOns" : ( 35004, 35006 ),
"DR24dOns" : ( 35006, 35007 ),
"DR24dAgeRec" : ( 35007, 35009 ),
"DR24dRec" : ( 35009, 35010 ),
"DPSxCount" : ( 35010, 35012 ),
"DP1" : ( 35012, 35013 ),
"DP2" : ( 35013, 35014 ),
"DP2a" : ( 35014, 35015 ),
"DP3" : ( 35015, 35017 ),
"DP3_1" : ( 35017, 35019 ),
"DP3aNum" : ( 35019, 35021 ),
"DP3aUnit" : ( 35021, 35022 ),
"DP3a1" : ( 35022, 35023 ),
"DP3b" : ( 35023, 35025 ),
"DP4a" : ( 35025, 35026 ),
"DP4b" : ( 35026, 35027 ),
"DP4c" : ( 35027, 35028 ),
"DP5" : ( 35028, 35029 ),
"DP6a" : ( 35029, 35030 ),
"DP6a1" : ( 35030, 35031 ),
"DP6b" : ( 35031, 35032 ),
"DP6b1" : ( 35032, 35033 ),
"DP6b2" : ( 35033, 35034 ),
"DP6c" : ( 35034, 35037 ),
"DP6d" : ( 35037, 35040 ),
"DP6Num" : ( 35040, 35042 ),
"DP6Unit" : ( 35042, 35043 ),
"DP7" : ( 35043, 35044 ),
"DP7a" : ( 35044, 35045 ),
"DP7b" : ( 35045, 35046 ),
"DP7c" : ( 35046, 35047 ),
"DP7d" : ( 35047, 35048 ),
"DP7e" : ( 35048, 35049 ),
"DP7f" : ( 35049, 35050 ),
"DP8" : ( 35050, 35051 ),
"DP8a" : ( 35051, 35052 ),
"DP9" : ( 35052, 35053 ),
"DP9a" : ( 35053, 35054 ),
"DP10" : ( 35054, 35055 ),
"DP11" : ( 35055, 35056 ),
"DP12" : ( 35056, 35057 ),
"DP13" : ( 35057, 35058 ),
"DP14" : ( 35058, 35059 ),
"DP15a" : ( 35059, 35060 ),
"DP15b" : ( 35060, 35061 ),
"DP15c" : ( 35061, 35062 ),
"DP15d" : ( 35062, 35063 ),
"DPSxNum01" : ( 35063, 35082 ),
"DPSxNum02" : ( 35082, 35101 ),
"DPSxNum03" : ( 35101, 35120 ),
"DPSxNum04" : ( 35120, 35139 ),
"DPSxNum05" : ( 35139, 35158 ),
"DPSxNum06" : ( 35158, 35177 ),
"DPSxNum07" : ( 35177, 35196 ),
"DPSxNum08" : ( 35196, 35215 ),
"DPSxNum09" : ( 35215, 35234 ),
"DPSxNum10" : ( 35234, 35253 ),
"DPSxNum11" : ( 35253, 35272 ),
"DPSxNum12" : ( 35272, 35291 ),
"DPSxNum13" : ( 35291, 35310 ),
"DPSxNum14" : ( 35310, 35329 ),
"DPSxNum15" : ( 35329, 35348 ),
"DPSxNum16" : ( 35348, 35367 ),
"DPSxNum17" : ( 35367, 35386 ),
"DPSxNum18" : ( 35386, 35405 ),
"DPSxNum19" : ( 35405, 35424 ),
"DPSxNum20" : ( 35424, 35443 ),
"DPSxNum21" : ( 35443, 35462 ),
"DP4aCL" : ( 35462, 35463 ),
"DP4bCL" : ( 35463, 35464 ),
"DP4cCL" : ( 35464, 35465 ),
"DP5CL" : ( 35465, 35466 ),
"DP6aCL" : ( 35466, 35467 ),
"DP6bCL" : ( 35467, 35468 ),
"DP7bCL" : ( 35468, 35469 ),
"DP7cCL" : ( 35469, 35470 ),
"DP7eCL" : ( 35470, 35471 ),
"DP7fCL" : ( 35471, 35472 ),
"DP8aCL" : ( 35472, 35473 ),
"DP9aCL" : ( 35473, 35474 ),
"DP10CL" : ( 35474, 35475 ),
"DP11CL" : ( 35475, 35476 ),
"DP12CL" : ( 35476, 35477 ),
"DP13CL" : ( 35477, 35478 ),
"DP14CL" : ( 35478, 35479 ),
"DP15aCL" : ( 35479, 35480 ),
"DP15bCL" : ( 35480, 35481 ),
"DP15cCL" : ( 35481, 35482 ),
"DP15dCL" : ( 35482, 35483 ),
"DP16a" : ( 35483, 35484 ),
"DP17" : ( 35484, 35485 ),
"DP18" : ( 35485, 35486 ),
"DP18Drug1" : ( 35486, 35566 ),
"DP18Cd1" : ( 35566, 35569 ),
"DP18Another1" : ( 35569, 35570 ),
"DP18DRUG2" : ( 35570, 35650 ),
"DP18Cd2" : ( 35650, 35653 ),
"DP18Another2" : ( 35653, 35654 ),
"DP18DRUG3" : ( 35654, 35734 ),
"DP18Cd3" : ( 35734, 35737 ),
"DP18Another3" : ( 35737, 35738 ),
"DP18DRUG4" : ( 35738, 35818 ),
"DP18Cd4" : ( 35818, 35837 ),
"DP19_1" : ( 35837, 35838 ),
"DP19_2" : ( 35838, 35839 ),
"DP19_3" : ( 35839, 35840 ),
"DP19_4" : ( 35840, 35841 ),
"DP19_5" : ( 35841, 35842 ),
"DP20" : ( 35842, 35843 ),
"DP21" : ( 35843, 35844 ),
"DP21_1" : ( 35844, 35845 ),
"DP21_2" : ( 35845, 35846 ),
"DP21a" : ( 35846, 35849 ),
"DP21a1" : ( 35849, 35850 ),
"DP21b" : ( 35850, 35853 ),
"DP21b1" : ( 35853, 35854 ),
"DP21c" : ( 35854, 35855 ),
"DP22" : ( 35855, 35856 ),
"DP22DRUG1" : ( 35856, 35936 ),
"DP22Cd1" : ( 35936, 35955 ),
"DP22Another1" : ( 35955, 35956 ),
"DP22DRUG2" : ( 35956, 36036 ),
"DP22Cd2" : ( 36036, 36039 ),
"DP22Another2" : ( 36039, 36040 ),
"DP22DRUG3" : ( 36040, 36120 ),
"DP22Cd3" : ( 36120, 36123 ),
"DP22b1" : ( 36123, 36124 ),
"DP22c1" : ( 36124, 36126 ),
"DP22d1" : ( 36126, 36128 ),
"DP22e1" : ( 36128, 36130 ),
"DP22b2" : ( 36130, 36131 ),
"DP22c2" : ( 36131, 36133 ),
"DP22d2" : ( 36133, 36135 ),
"DP22e2" : ( 36135, 36137 ),
"DP22b3" : ( 36137, 36138 ),
"DP22c3" : ( 36138, 36140 ),
"DP22d3" : ( 36140, 36142 ),
"DP22e3" : ( 36142, 36144 ),
"DP23" : ( 36144, 36145 ),
"DP23DRUG1" : ( 36145, 36225 ),
"DP23Cd1" : ( 36225, 36228 ),
"DP23Another" : ( 36228, 36229 ),
"DP23DRUG2" : ( 36229, 36309 ),
"DP23Cd2" : ( 36309, 36312 ),
"DP24" : ( 36312, 36313 ),
"DP24a" : ( 36313, 36393 ),
"DP24_mo" : ( 36393, 36395 ),
"DP24_YR" : ( 36395, 36399 ),
"DP25" : ( 36399, 36400 ),
"DP25SPECIFY" : ( 36400, 36480 ),
"DP25CODE" : ( 36480, 36499 ),
"DP26" : ( 36499, 36500 ),
"DP26a" : ( 36500, 36501 ),
"DP27SxCount" : ( 36501, 36503 ),
"DP27x" : ( 36503, 36506 ),
"DPx_ao27" : ( 36506, 36508 ),
"DPx_ar27" : ( 36508, 36510 ),
"DPx_r27" : ( 36510, 36511 ),
"DP27x1" : ( 36511, 36513 ),
"DP27" : ( 36513, 36514 ),
"DP27a" : ( 36514, 36516 ),
"DP27a1" : ( 36516, 36518 ),
"DP27b1" : ( 36518, 36519 ),
"DP27b2" : ( 36519, 36520 ),
"DP27b3" : ( 36520, 36521 ),
"DP27b4" : ( 36521, 36522 ),
"DP27b4a" : ( 36522, 36523 ),
"DP27b5" : ( 36523, 36524 ),
"DP27b5a" : ( 36524, 36525 ),
"DP27b5b" : ( 36525, 36526 ),
"DP27b5d" : ( 36526, 36529 ),
"DP27b5e" : ( 36529, 36532 ),
"DP27b5Num" : ( 36532, 36534 ),
"DP27b5Unit" : ( 36534, 36535 ),
"DP27b6" : ( 36535, 36536 ),
"DP27b7" : ( 36536, 36537 ),
"DP27b8" : ( 36537, 36538 ),
"DP27b9" : ( 36538, 36539 ),
"DP27b10" : ( 36539, 36540 ),
"DP27b11" : ( 36540, 36541 ),
"DP27b12" : ( 36541, 36542 ),
"DP27b13" : ( 36542, 36543 ),
"DP27b14" : ( 36543, 36544 ),
"DP27c" : ( 36544, 36545 ),
"DP27dMo" : ( 36545, 36547 ),
"DP27dYr" : ( 36547, 36551 ),
"DP28Num" : ( 36551, 36553 ),
"DP28UNIT" : ( 36553, 36554 ),
"DP30_1" : ( 36554, 36555 ),
"DP30_2" : ( 36555, 36556 ),
"DP30_3" : ( 36556, 36557 ),
"DP30_4" : ( 36557, 36558 ),
"DP30_5" : ( 36558, 36559 ),
"DP31" : ( 36559, 36560 ),
"DP31a" : ( 36560, 36561 ),
"DP31b1" : ( 36561, 36562 ),
"DP31b2" : ( 36562, 36563 ),
"DP31b3" : ( 36563, 36564 ),
"DP31b4" : ( 36564, 36565 ),
"DP31b5" : ( 36565, 36566 ),
"DP_ao31" : ( 36566, 36568 ),
"DP_ar31" : ( 36568, 36570 ),
"DP_r31" : ( 36570, 36571 ),
"DP32" : ( 36571, 36572 ),
"DP33" : ( 36572, 36573 ),
"DP33DRUG1" : ( 36573, 36653 ),
"DP33Cd1" : ( 36653, 36656 ),
"DP33Another1" : ( 36656, 36657 ),
"DP33DRUG2" : ( 36657, 36737 ),
"DP33Cd2" : ( 36737, 36740 ),
"DP33Another2" : ( 36740, 36741 ),
"DP33DRUG3" : ( 36741, 36821 ),
"DP33Cd3" : ( 36821, 36824 ),
"DP34" : ( 36824, 36825 ),
"DP34Num" : ( 36825, 36827 ),
"DP34Unit" : ( 36827, 36828 ),
"PS1" : ( 36828, 36829 ),
"PS1_Specify" : ( 36829, 37054 ),
"PS2" : ( 37054, 37055 ),
"PS2_Specify" : ( 37055, 37280 ),
"PS5" : ( 37280, 37281 ),
"PS5_Specify" : ( 37281, 37506 ),
"PS8" : ( 37506, 37507 ),
"PS8_Specify" : ( 37507, 37732 ),
"IND_ID4" : ( 37732, 37741 ),
"AS1" : ( 37741, 37742 ),
"AS1a" : ( 37742, 37743 ),
"AS_ao1" : ( 37743, 37745 ),
"AS_ao1DK" : ( 37745, 37746 ),
"AS2a" : ( 37746, 37747 ),
"AS2b" : ( 37747, 37748 ),
"ASa_ao2" : ( 37748, 37750 ),
"ASb_ao2" : ( 37750, 37752 ),
"ASa_ao2DK" : ( 37752, 37753 ),
"ASb_ao2DK" : ( 37753, 37754 ),
"AS3" : ( 37754, 37755 ),
"AS3a" : ( 37755, 37756 ),
"AS3b" : ( 37756, 37757 ),
"ASb_ao3" : ( 37757, 37759 ),
"ASb_ao3DK" : ( 37759, 37760 ),
"ASb_ar3" : ( 37760, 37762 ),
"AS3c" : ( 37762, 37763 ),
"AS3c1" : ( 37763, 37766 ),
"ASc_ao3" : ( 37766, 37768 ),
"ASc_ao3DK" : ( 37768, 37769 ),
"AS4" : ( 37769, 37770 ),
"AS4a" : ( 37770, 37771 ),
"AS_ao4" : ( 37771, 37773 ),
"AS_ao4DK" : ( 37773, 37774 ),
"AS5" : ( 37774, 37775 ),
"AS5a" : ( 37775, 37776 ),
"AS_ao5" : ( 37776, 37778 ),
"AS_ao5DK" : ( 37778, 37779 ),
"AS6" : ( 37779, 37780 ),
"ASa1_ao6" : ( 37780, 37782 ),
"ASa1_ao6DK" : ( 37782, 37783 ),
"ASa2_ao6" : ( 37783, 37785 ),
"ASa2_ao6DK" : ( 37785, 37786 ),
"ASa_ar6" : ( 37786, 37788 ),
"AS6b" : ( 37788, 37789 ),
"ASc1_ao6" : ( 37789, 37791 ),
"ASc1_ao6DK" : ( 37791, 37792 ),
"ASc2_ao6" : ( 37792, 37794 ),
"ASc2_ao6DK" : ( 37794, 37795 ),
"ASc_ar6" : ( 37795, 37797 ),
"AS6d" : ( 37797, 37798 ),
"ASe1_ao6" : ( 37798, 37800 ),
"ASe1_ao6DK" : ( 37800, 37801 ),
"ASe2_ao6" : ( 37801, 37803 ),
"ASe2_ao6DK" : ( 37803, 37804 ),
"ASe_ar6" : ( 37804, 37806 ),
"AS7" : ( 37806, 37807 ),
"AS_ao7" : ( 37807, 37809 ),
"AS_ao7DK" : ( 37809, 37810 ),
"AS8" : ( 37810, 37811 ),
"AS_ao8" : ( 37811, 37813 ),
"AS_ao8DK" : ( 37813, 37814 ),
"AS9" : ( 37814, 37815 ),
"AS_ao9" : ( 37815, 37817 ),
"AS_ao9DK" : ( 37817, 37818 ),
"AS_ar9" : ( 37818, 37820 ),
"AS10" : ( 37820, 37821 ),
"AS10_SPECIFY" : ( 37821, 38046 ),
"AS10a" : ( 38046, 38048 ),
"AS_ao10" : ( 38048, 38050 ),
"AS_ao10DK" : ( 38050, 38051 ),
"AS_ar10" : ( 38051, 38053 ),
"AS11" : ( 38053, 38054 ),
"AS11a" : ( 38054, 38055 ),
"AS11b" : ( 38055, 38056 ),
"AS11b1" : ( 38056, 38057 ),
"AS1_ao11" : ( 38057, 38059 ),
"AS1_ao11DK" : ( 38059, 38060 ),
"AS2_ao11" : ( 38060, 38062 ),
"AS2_ao11DK" : ( 38062, 38063 ),
"AS_ar11" : ( 38063, 38065 ),
"AS12" : ( 38065, 38066 ),
"AS1_ao12" : ( 38066, 38068 ),
"AS1_ao12DK" : ( 38068, 38069 ),
"AS2_ao12" : ( 38069, 38071 ),
"AS2_ao12DK" : ( 38071, 38072 ),
"AS_ar12" : ( 38072, 38074 ),
"AS13" : ( 38074, 38075 ),
"AS13a" : ( 38075, 38076 ),
"AS1_ao13" : ( 38076, 38078 ),
"AS1_ao13DK" : ( 38078, 38079 ),
"AS2_ao13" : ( 38079, 38081 ),
"AS2_ao13DK" : ( 38081, 38082 ),
"AS_ar13" : ( 38082, 38084 ),
"AS14" : ( 38084, 38085 ),
"ASa1_ao14" : ( 38085, 38087 ),
"ASa2_ao14" : ( 38087, 38089 ),
"ASa1_ao14DK" : ( 38089, 38090 ),
"ASa2_ao14DK" : ( 38090, 38091 ),
"ASa_ar14" : ( 38091, 38093 ),
"AS14b" : ( 38093, 38094 ),
"ASc1_ao14" : ( 38094, 38096 ),
"ASc2_ao14" : ( 38096, 38098 ),
"ASc1_ao14DK" : ( 38098, 38099 ),
"ASc2_ao14DK" : ( 38099, 38100 ),
"ASc_ar14" : ( 38100, 38102 ),
"AS14d" : ( 38102, 38103 ),
"ASe1_ao14" : ( 38103, 38105 ),
"ASe2_ao14" : ( 38105, 38107 ),
"ASe1_ao14DK" : ( 38107, 38108 ),
"ASe2_ao14DK" : ( 38108, 38109 ),
"ASe_ar14" : ( 38109, 38111 ),
"AS14f" : ( 38111, 38112 ),
"AS15" : ( 38112, 38113 ),
"AS1_ao15" : ( 38113, 38115 ),
"AS1_ao15DK" : ( 38115, 38116 ),
"AS2_ao15" : ( 38116, 38118 ),
"AS2_ao15DK" : ( 38118, 38119 ),
"AS_ar15" : ( 38119, 38121 ),
"AS15b" : ( 38121, 38122 ),
"AS16" : ( 38122, 38123 ),
"AS1_ao16" : ( 38123, 38125 ),
"AS1_ao16DK" : ( 38125, 38126 ),
"AS2_ao16" : ( 38126, 38128 ),
"AS2_ao16DK" : ( 38128, 38129 ),
"AS_ar16" : ( 38129, 38131 ),
"AS16b" : ( 38131, 38132 ),
"AS17" : ( 38132, 38133 ),
"AS17a" : ( 38133, 38134 ),
"AS1_ao17" : ( 38134, 38136 ),
"AS1_ao17DK" : ( 38136, 38137 ),
"AS2_ao17" : ( 38137, 38139 ),
"AS2_ao17DK" : ( 38139, 38140 ),
"AS_ar17" : ( 38140, 38142 ),
"AS17c" : ( 38142, 38143 ),
"AS18" : ( 38143, 38144 ),
"AS18_SPECIFY" : ( 38144, 38369 ),
"AS1_ao18" : ( 38369, 38371 ),
"AS1_ao18DK" : ( 38371, 38372 ),
"AS2_ao18" : ( 38372, 38374 ),
"AS2_ao18DK" : ( 38374, 38375 ),
"AS_ar18" : ( 38375, 38377 ),
"AS_ar18DK" : ( 38377, 38378 ),
"AS18b" : ( 38378, 38379 ),
"AS18c" : ( 38379, 38380 ),
"AS18d" : ( 38380, 38381 ),
"AS19" : ( 38381, 38382 ),
"AS19_SPECIFY" : ( 38382, 38607 ),
"AS1_ao19" : ( 38607, 38609 ),
"AS1_ao19DK" : ( 38609, 38610 ),
"AS2_ao19" : ( 38610, 38612 ),
"AS2_ao19DK" : ( 38612, 38613 ),
"AS_ar19" : ( 38613, 38615 ),
"AS20" : ( 38615, 38616 ),
"AS1_ao20" : ( 38616, 38618 ),
"AS1_ao20DK" : ( 38618, 38619 ),
"AS2_ao20" : ( 38619, 38621 ),
"AS2_ao20DK" : ( 38621, 38622 ),
"AS_ar20" : ( 38622, 38624 ),
"AS21" : ( 38624, 38625 ),
"AS1_ao21" : ( 38625, 38627 ),
"AS1_ao21DK" : ( 38627, 38628 ),
"AS2_ao21" : ( 38628, 38630 ),
"AS2_ao21DK" : ( 38630, 38631 ),
"AS_ar21" : ( 38631, 38633 ),
"AS22" : ( 38633, 38634 ),
"AS22Qsx" : ( 38634, 38635 ),
"AS22Qsx2" : ( 38635, 38636 ),
"AS22Qsx3" : ( 38636, 38637 ),
"AS22Qsx4" : ( 38637, 38638 ),
"AS22Qsx5" : ( 38638, 38639 ),
"AS22Qsx6" : ( 38639, 38640 ),
"AS22Qsx7" : ( 38640, 38641 ),
"AS22Qsx8" : ( 38641, 38642 ),
"AS22Qsx9" : ( 38642, 38643 ),
"AS22Qsx10" : ( 38643, 38644 ),
"AS22Qsx11" : ( 38644, 38645 ),
"AS22Qsx12" : ( 38645, 38646 ),
"AS22Qsx13" : ( 38646, 38647 ),
"AS22Qsx14" : ( 38647, 38648 ),
"AS22Qsx15" : ( 38648, 38649 ),
"AS22Qsx16" : ( 38649, 38650 ),
"AS22Qsx17" : ( 38650, 38651 ),
"AS22Qsx18" : ( 38651, 38652 ),
"AS22Qsx19" : ( 38652, 38653 ),
"AS22Qsx20" : ( 38653, 38654 ),
"AS22Qsx21" : ( 38654, 38655 ),
"AS_ao22" : ( 38655, 38657 ),
"AS_ao22DK" : ( 38657, 38658 ),
"AS_ar22" : ( 38658, 38660 ),
"AS1b_C" : ( 38660, 38661 ),
"AS3b_C" : ( 38661, 38662 ),
"AS3c_C" : ( 38662, 38663 ),
"AS3c1_C" : ( 38663, 38664 ),
"AS4b_C" : ( 38664, 38665 ),
"AS5b_C" : ( 38665, 38666 ),
"AS6_C" : ( 38666, 38667 ),
"AS9_C" : ( 38667, 38668 ),
"AS10_C" : ( 38668, 38669 ),
"AS11_C" : ( 38669, 38670 ),
"AS13_C" : ( 38670, 38671 ),
"AS14_C" : ( 38671, 38672 ),
"AS14b_C" : ( 38672, 38673 ),
"AS14d_C" : ( 38673, 38674 ),
"AS15_C" : ( 38674, 38675 ),
"AS16_C" : ( 38675, 38676 ),
"AS17a_C" : ( 38676, 38677 ),
"AS18_C" : ( 38677, 38678 ),
"AS19_C" : ( 38678, 38679 ),
"AS20_C" : ( 38679, 38680 ),
"AS21_C" : ( 38680, 38681 ),
"AS23_1" : ( 38681, 38682 ),
"AS23_2" : ( 38682, 38683 ),
"AS23_3" : ( 38683, 38684 ),
"AS23_3a" : ( 38684, 38685 ),
"AS23_4" : ( 38685, 38686 ),
"AS23a" : ( 38686, 38687 ),
"AS23a_Specify" : ( 38687, 38912 ),
"AS23b" : ( 38912, 38913 ),
"AS1_ao23" : ( 38913, 38915 ),
"AS1_ao23DK" : ( 38915, 38916 ),
"AS2_ao23" : ( 38916, 38918 ),
"AS2_ao23DK" : ( 38918, 38919 ),
"AS_ar23" : ( 38919, 38921 ),
"AS24" : ( 38921, 38922 ),
"AS1_ao24" : ( 38922, 38924 ),
"AS1_ao24DK" : ( 38924, 38925 ),
"AS2_ao24" : ( 38925, 38927 ),
"AS2_ao24DK" : ( 38927, 38928 ),
"AS_ar24" : ( 38928, 38930 ),
"AS25" : ( 38930, 38931 ),
"AS25a" : ( 38931, 38932 ),
"AS25b" : ( 38932, 38933 ),
"AS25c" : ( 38933, 38934 ),
"AS25d" : ( 38934, 38935 ),
"AS25e" : ( 38935, 38936 ),
"AS1_ao25" : ( 38936, 38938 ),
"AS1_ao25DK" : ( 38938, 38939 ),
"AS2_ao25" : ( 38939, 38941 ),
"AS2_ao25DK" : ( 38941, 38942 ),
"AS_ar25" : ( 38942, 38944 ),
"AS26" : ( 38944, 38945 ),
"AS1_ao26" : ( 38945, 38947 ),
"AS1_ao26DK" : ( 38947, 38948 ),
"AS2_ao26" : ( 38948, 38950 ),
"AS2_ao26DK" : ( 38950, 38951 ),
"AS_ar26" : ( 38951, 38953 ),
"AS27" : ( 38953, 38954 ),
"AS1_ao27" : ( 38954, 38956 ),
"AS1_ao27DK" : ( 38956, 38957 ),
"AS2_ao27" : ( 38957, 38959 ),
"AS2_ao27DK" : ( 38959, 38960 ),
"AS_ar27" : ( 38960, 38962 ),
"AS28" : ( 38962, 38963 ),
"As28a" : ( 38963, 38965 ),
"As28a1" : ( 38965, 38966 ),
"AS1_ao28" : ( 38966, 38968 ),
"AS1_ao28DK" : ( 38968, 38969 ),
"AS2_ao28" : ( 38969, 38971 ),
"AS2_ao28DK" : ( 38971, 38972 ),
"AS_ar28" : ( 38972, 38974 ),
"AS29" : ( 38974, 38975 ),
"AS29_SPECIFY" : ( 38975, 39200 ),
"AS1_ao29" : ( 39200, 39202 ),
"AS1_ao29DK" : ( 39202, 39203 ),
"AS2_ao29" : ( 39203, 39205 ),
"AS2_ao29DK" : ( 39205, 39206 ),
"AS_ar29" : ( 39206, 39208 ),
"AS29b" : ( 39208, 39210 ),
"AS29c" : ( 39210, 39211 ),
"AS29d" : ( 39211, 39212 ),
"AS29e" : ( 39212, 39213 ),
"AS30" : ( 39213, 39214 ),
"AS30a" : ( 39214, 39215 ),
"AS31" : ( 39215, 39216 ),
"AS31_specify" : ( 39216, 39441 ),
"AS1_ao31" : ( 39441, 39443 ),
"AS1_ao31DK" : ( 39443, 39444 ),
"AS2_ao31" : ( 39444, 39446 ),
"AS2_ao31DK" : ( 39446, 39447 ),
"AS_ar31" : ( 39447, 39449 ),
"AS31c" : ( 39449, 39450 ),
"AS32" : ( 39450, 39451 ),
"AS32a" : ( 39451, 39452 ),
"As32b" : ( 39452, 39453 ),
"AS33" : ( 39453, 39454 ),
"As33b" : ( 39454, 39455 ),
"AS_ao33" : ( 39455, 39457 ),
"AS_ao33DK" : ( 39457, 39458 ),
"AS_ar33" : ( 39458, 39460 ),
"AS34" : ( 39460, 39461 ),
"AS35" : ( 39461, 39463 ),
"AS35a" : ( 39463, 39466 ),
"AS35b" : ( 39466, 39467 ),
"AS36" : ( 39467, 39468 ),
"AS36b" : ( 39468, 39469 ),
"AS36a" : ( 39469, 39470 ),
"AS37" : ( 39470, 39471 ),
"AS38" : ( 39471, 39472 ),
"AS38_SPECIFY" : ( 39472, 39697 ),
"AS38a" : ( 39697, 39698 ),
"AS38a_SPECIFY" : ( 39698, 39923 ),
"AS1_ao38" : ( 39923, 39925 ),
"AS1_ao38DK" : ( 39925, 39926 ),
"AS2_ao38" : ( 39926, 39928 ),
"AS2_ao38DK" : ( 39928, 39929 ),
"AS_ar38" : ( 39929, 39931 ),
"AS39" : ( 39931, 39932 ),
"AS39b" : ( 39932, 39933 ),
"AS1_ao39" : ( 39933, 39935 ),
"AS1_ao39DK" : ( 39935, 39936 ),
"AS2_ao39" : ( 39936, 39938 ),
"AS2_ao39DK" : ( 39938, 39939 ),
"AS_ar39" : ( 39939, 39941 ),
"AS_ar39DK" : ( 39941, 39942 ),
"AS40" : ( 39942, 39943 ),
"AS41" : ( 39943, 39944 ),
"AS42" : ( 39944, 39945 ),
"AS_ar43" : ( 39945, 39947 ),
"AS_r43" : ( 39947, 39948 ),
"AS43a" : ( 39948, 39949 ),
"AS43b" : ( 39949, 39950 ),
"AD1_1" : ( 39950, 39951 ),
"AD1a1" : ( 39951, 39952 ),
"AD1_2" : ( 39952, 39953 ),
"AD1_3" : ( 39953, 39954 ),
"AD1_4" : ( 39954, 39955 ),
"AD1_5" : ( 39955, 39956 ),
"AD1_6" : ( 39956, 39957 ),
"AD1a6" : ( 39957, 39958 ),
"AD1_7" : ( 39958, 39959 ),
"AD1_8" : ( 39959, 39960 ),
"AD1_9" : ( 39960, 39961 ),
"AD1_10" : ( 39961, 39962 ),
"AD2_1" : ( 39962, 39963 ),
"AD2_2" : ( 39963, 39964 ),
"AD2_3" : ( 39964, 39965 ),
"Ad2_4" : ( 39965, 39966 ),
"AD2a" : ( 39966, 39967 ),
"AD_ao3" : ( 39967, 39969 ),
"AD_ao3DK" : ( 39969, 39971 ),
"AD_ar3" : ( 39971, 39973 ),
"AD_r3" : ( 39973, 39974 ),
"AD3a" : ( 39974, 39975 ),
"AD4" : ( 39975, 39976 ),
"AD5" : ( 39976, 39977 ),
"AD_ao5" : ( 39977, 39979 ),
"AD5Drug1" : ( 39979, 40059 ),
"AD5cd1" : ( 40059, 40062 ),
"AD5Another1" : ( 40062, 40063 ),
"AD5Drug2" : ( 40063, 40143 ),
"AD5cd2" : ( 40143, 40146 ),
"AD5Another2" : ( 40146, 40147 ),
"AD5Drug3" : ( 40147, 40227 ),
"AD5cd3" : ( 40227, 40230 ),
"AD5_dk" : ( 40230, 40232 ),
"AD5a" : ( 40232, 40233 ),
"AD5b" : ( 40233, 40235 ),
"AD5c" : ( 40235, 40236 ),
"AD6_1" : ( 40236, 40237 ),
"AD6_2" : ( 40237, 40238 ),
"AD6a2" : ( 40238, 40239 ),
"AD6_3" : ( 40239, 40240 ),
"AD6_4" : ( 40240, 40241 ),
"AD6_5" : ( 40241, 40242 ),
"AD6_6" : ( 40242, 40243 ),
"AD6a6" : ( 40243, 40244 ),
"AD6_7" : ( 40244, 40245 ),
"AD6_8" : ( 40245, 40246 ),
"AD6_9" : ( 40246, 40247 ),
"AD6_10" : ( 40247, 40248 ),
"AD6_11" : ( 40248, 40249 ),
"AD7_1" : ( 40249, 40250 ),
"AD7_2" : ( 40250, 40251 ),
"AD7_3" : ( 40251, 40252 ),
"AD7_4" : ( 40252, 40253 ),
"AD7a" : ( 40253, 40254 ),
"AD_ao8" : ( 40254, 40256 ),
"AD_ar8" : ( 40256, 40258 ),
"AD_r8" : ( 40258, 40259 ),
"AD8a" : ( 40259, 40260 ),
"AD9" : ( 40260, 40261 ),
"AD10" : ( 40261, 40262 ),
"AD_ao10" : ( 40262, 40264 ),
"AD10Drug1" : ( 40264, 40344 ),
"AD10cd1" : ( 40344, 40347 ),
"AD10Another1" : ( 40347, 40348 ),
"AD10Drug2" : ( 40348, 40428 ),
"AD10cd2" : ( 40428, 40431 ),
"AD10Another2" : ( 40431, 40432 ),
"AD10Drug3" : ( 40432, 40512 ),
"AD10cd3" : ( 40512, 40515 ),
"AD10_dk" : ( 40515, 40517 ),
"AD10a" : ( 40517, 40518 ),
"AD10b" : ( 40518, 40520 ),
"AD10c" : ( 40520, 40521 ),
"PT1" : ( 40521, 40522 ),
"PT1a1" : ( 40522, 40523 ),
"PT1a1Age" : ( 40523, 40525 ),
"PT1a2" : ( 40525, 40526 ),
"PT1a2Age" : ( 40526, 40528 ),
"PT1a3" : ( 40528, 40529 ),
"PT1a3Age" : ( 40529, 40531 ),
"PT1a4" : ( 40531, 40532 ),
"PT1a4Age" : ( 40532, 40534 ),
"PT1a5" : ( 40534, 40535 ),
"PT1a5Age" : ( 40535, 40537 ),
"PT1a6" : ( 40537, 40538 ),
"PT1a6Age" : ( 40538, 40540 ),
"PT1a7" : ( 40540, 40541 ),
"PT1a7Age" : ( 40541, 40543 ),
"PT1a8" : ( 40543, 40544 ),
"PT1a8Age" : ( 40544, 40546 ),
"PT1a9" : ( 40546, 40547 ),
"PT1a9Age" : ( 40547, 40549 ),
"PT1a10" : ( 40549, 40550 ),
"PT1a10Age" : ( 40550, 40552 ),
"PT1a11" : ( 40552, 40553 ),
"PT1a11Age" : ( 40553, 40555 ),
"PT1a12" : ( 40555, 40556 ),
"PT1a12Age" : ( 40556, 40558 ),
"PT1a13" : ( 40558, 40559 ),
"PT1a13Age" : ( 40559, 40561 ),
"PT1a14" : ( 40561, 40562 ),
"PT1a14Age" : ( 40562, 40564 ),
"PT1a15" : ( 40564, 40565 ),
"PT1a15Age" : ( 40565, 40567 ),
"PT1a16" : ( 40567, 40568 ),
"PT1a16Age" : ( 40568, 40570 ),
"PT1a17" : ( 40570, 40571 ),
"PT1a17Age" : ( 40571, 40573 ),
"PT1a18" : ( 40573, 40574 ),
"PT1a18Age" : ( 40574, 40576 ),
"PT1a19" : ( 40576, 40577 ),
"PT1a19Age" : ( 40577, 40579 ),
"PT1a19_Specify" : ( 40579, 40659 ),
"PT1a20" : ( 40659, 40660 ),
"PT1a20Age" : ( 40660, 40662 ),
"PT1a20_Specify" : ( 40662, 40742 ),
"PT1b" : ( 40742, 40743 ),
"PT1bEVENT" : ( 40743, 40745 ),
"PT2" : ( 40745, 40747 ),
"PT3" : ( 40747, 40748 ),
"PT4" : ( 40748, 40749 ),
"PT5" : ( 40749, 40750 ),
"PT6" : ( 40750, 40751 ),
"PT7" : ( 40751, 40752 ),
"PT8a" : ( 40752, 40753 ),
"PT8b" : ( 40753, 40754 ),
"PT9" : ( 40754, 40755 ),
"PT10" : ( 40755, 40756 ),
"PT11" : ( 40756, 40757 ),
"PT11a" : ( 40757, 40758 ),
"PT12" : ( 40758, 40759 ),
"PT13" : ( 40759, 40760 ),
"PT14" : ( 40760, 40761 ),
"PT15" : ( 40761, 40762 ),
"PT16a" : ( 40762, 40763 ),
"PT16b" : ( 40763, 40764 ),
"PT17" : ( 40764, 40765 ),
"PT18" : ( 40765, 40766 ),
"PT19" : ( 40766, 40767 ),
"PT20" : ( 40767, 40768 ),
"PT21" : ( 40768, 40769 ),
"PT22a" : ( 40769, 40770 ),
"PT22b" : ( 40770, 40771 ),
"PT23" : ( 40771, 40772 ),
"PT23a1" : ( 40772, 40773 ),
"PT23a3" : ( 40773, 40774 ),
"PT23b" : ( 40774, 40775 ),
"PT23c" : ( 40775, 40776 ),
"PT23d" : ( 40776, 40777 ),
"PT23f" : ( 40777, 40778 ),
"PT23g" : ( 40778, 40779 ),
"PT23h" : ( 40779, 40780 ),
"PT23i" : ( 40780, 40781 ),
"PT23j" : ( 40781, 40782 ),
"PT23Probe" : ( 40782, 40783 ),
"PT24" : ( 40783, 40784 ),
"PT24a" : ( 40784, 40785 ),
"PT24b" : ( 40785, 40786 ),
"PT25_NUM" : ( 40786, 40788 ),
"PT25_UNIT" : ( 40788, 40789 ),
"PT26_NUM" : ( 40789, 40791 ),
"PT26_UNIT" : ( 40791, 40792 ),
"PT27AgeRec" : ( 40792, 40794 ),
"PT27Rec" : ( 40794, 40795 ),
"PT27a" : ( 40795, 40796 ),
"PT27FromAge1" : ( 40796, 40798 ),
"PT27ToAge1" : ( 40798, 40800 ),
"PT27FromAge2" : ( 40800, 40802 ),
"PT27ToAge2" : ( 40802, 40804 ),
"PT27b" : ( 40804, 40805 ),
"PT28" : ( 40805, 40806 ),
"PT28a" : ( 40806, 40807 ),
"AlcEver" : ( 40807, 40808 ),
"AnyDrug" : ( 40808, 40809 ),
"AnyDrugEver" : ( 40809, 40810 ),
"UseDrugFill" : ( 40810, 41065 ),
"SubstanceString" : ( 41065, 41320 ),
"SubstanceCount" : ( 41320, 41321 ),
"ALCOHOL" : ( 41321, 41322 ),
"MARIJUANA" : ( 41322, 41323 ),
"COCAINE" : ( 41323, 41324 ),
"STIMULANTS" : ( 41324, 41325 ),
"SEDATIVES" : ( 41325, 41326 ),
"OPIATES" : ( 41326, 41327 ),
"OTHER15" : ( 41327, 41328 ),
"OC1" : ( 41328, 41329 ),
"OC1a" : ( 41329, 41330 ),
"OC1b" : ( 41330, 41331 ),
"OC1c" : ( 41331, 41332 ),
"OCp1" : ( 41332, 41333 ),
"OCp1a1" : ( 41333, 41334 ),
"OCp1a2" : ( 41334, 41335 ),
"OCp1a3" : ( 41335, 41336 ),
"OCp1b" : ( 41336, 41337 ),
"OCp1c" : ( 41337, 41338 ),
"OCp1d" : ( 41338, 41339 ),
"OCp1f" : ( 41339, 41340 ),
"OCp1g" : ( 41340, 41341 ),
"OCp1h" : ( 41341, 41342 ),
"OCp1i" : ( 41342, 41343 ),
"OCp1j" : ( 41343, 41344 ),
"OC1Probe" : ( 41344, 41345 ),
"OC2" : ( 41345, 41346 ),
"OC3" : ( 41346, 41347 ),
"OC4" : ( 41347, 41348 ),
"OC5" : ( 41348, 41349 ),
"OC6" : ( 41349, 41350 ),
"OC7" : ( 41350, 41351 ),
"OC7A" : ( 41351, 41352 ),
"OC_AO8" : ( 41352, 41354 ),
"OC_O8" : ( 41354, 41355 ),
"OC_AR8" : ( 41355, 41357 ),
"OC_R8" : ( 41357, 41358 ),
"OC8_1" : ( 41358, 41360 ),
"OC9" : ( 41360, 41361 ),
"OC9Specify" : ( 41361, 41586 ),
"OC9A" : ( 41586, 41587 ),
"OCp9" : ( 41587, 41588 ),
"OCp9a1" : ( 41588, 41589 ),
"OCp9a2" : ( 41589, 41590 ),
"OCp9a3" : ( 41590, 41591 ),
"OCp9b" : ( 41591, 41592 ),
"OCp9c" : ( 41592, 41593 ),
"OCp9d" : ( 41593, 41594 ),
"OCp9f" : ( 41594, 41595 ),
"OCp9g" : ( 41595, 41596 ),
"OCp9h" : ( 41596, 41597 ),
"OCp9i" : ( 41597, 41598 ),
"OCp9j" : ( 41598, 41599 ),
"OC9Probe" : ( 41599, 41600 ),
"OC10" : ( 41600, 41601 ),
"OC11" : ( 41601, 41602 ),
"OC13" : ( 41602, 41603 ),
"OC14" : ( 41603, 41604 ),
"OC15" : ( 41604, 41605 ),
"OC15A" : ( 41605, 41606 ),
"OC_ao16" : ( 41606, 41608 ),
"OC_o16" : ( 41608, 41609 ),
"OC_ar16" : ( 41609, 41611 ),
"OC_r16" : ( 41611, 41612 ),
"OC16_1" : ( 41612, 41614 ),
"OC17" : ( 41614, 41615 ),
"OC17a" : ( 41615, 41616 ),
"PN1x" : ( 41616, 41617 ),
"PNp1" : ( 41617, 41618 ),
"PNp1a1" : ( 41618, 41619 ),
"PNp1a2" : ( 41619, 41620 ),
"PNp1a3" : ( 41620, 41621 ),
"PNp1b" : ( 41621, 41622 ),
"PNp1c" : ( 41622, 41623 ),
"PNp1d" : ( 41623, 41624 ),
"PNp1f" : ( 41624, 41625 ),
"PNp1g" : ( 41625, 41626 ),
"PNp1h" : ( 41626, 41627 ),
"PNp1i" : ( 41627, 41628 ),
"PNp1j" : ( 41628, 41629 ),
"PN1Probe" : ( 41629, 41630 ),
"PN2a" : ( 41630, 41631 ),
"PN2b" : ( 41631, 41632 ),
"PN3" : ( 41632, 41633 ),
"PN3a" : ( 41633, 41634 ),
"PN3a1" : ( 41634, 41635 ),
"PN3b" : ( 41635, 41636 ),
"PN3b1" : ( 41636, 41637 ),
"PN3c" : ( 41637, 41638 ),
"PN3c1" : ( 41638, 41639 ),
"PN4_1" : ( 41639, 41640 ),
"PN4_2" : ( 41640, 41641 ),
"PN4_3" : ( 41641, 41642 ),
"PN4_4" : ( 41642, 41643 ),
"PN4_5" : ( 41643, 41644 ),
"PN4_6" : ( 41644, 41645 ),
"PN4_7" : ( 41645, 41646 ),
"PN4_8" : ( 41646, 41647 ),
"PN4_9" : ( 41647, 41648 ),
"PN4_10" : ( 41648, 41649 ),
"PN4_11" : ( 41649, 41650 ),
"PN4_12" : ( 41650, 41651 ),
"PN4_13" : ( 41651, 41652 ),
"PN5" : ( 41652, 41655 ),
"PN5_1" : ( 41655, 41656 ),
"PN6" : ( 41656, 41657 ),
"PN7b" : ( 41657, 41658 ),
"PN7c" : ( 41658, 41659 ),
"PN7d" : ( 41659, 41660 ),
"PN_ao8" : ( 41660, 41662 ),
"PN_o8" : ( 41662, 41663 ),
"PN_ar8" : ( 41663, 41665 ),
"PN_r8" : ( 41665, 41666 ),
"PN_ao8DK" : ( 41666, 41668 ),
"PN8a" : ( 41668, 41669 ),
"PN9" : ( 41669, 41670 ),
"PN10" : ( 41670, 41671 ),
"PN11_1" : ( 41671, 41672 ),
"PN11_2" : ( 41672, 41673 ),
"PN11Specify1" : ( 41673, 41753 ),
"PN11Cd1" : ( 41753, 41756 ),
"PN11Another1" : ( 41756, 41757 ),
"PN11Specify2" : ( 41757, 41837 ),
"PN11Cd2" : ( 41837, 41840 ),
"PN11Another2" : ( 41840, 41841 ),
"PN11Specify3" : ( 41841, 41921 ),
"PN11Cd3" : ( 41921, 41924 ),
"PN11a" : ( 41924, 41925 ),
"PN12" : ( 41925, 41926 ),
"PN12a" : ( 41926, 41927 ),
"AskSite" : ( 41927, 41928 ),
"AskRelationship" : ( 41928, 41929 ),
"mWHO" : ( 41929, 42184 ),
"fWHO" : ( 42184, 42439 ),
"HE1" : ( 42439, 42441 ),
"HE1BoxM" : ( 42441, 42442 ),
"HE1BoxF" : ( 42442, 42443 ),
"HE2" : ( 42443, 42445 ),
"HE4Specify" : ( 42445, 42525 ),
"HE4Code" : ( 42525, 42527 ),
"HE4b" : ( 42527, 42528 ),
"HE6" : ( 42528, 42529 ),
"HEm13" : ( 42529, 42530 ),
"HEf13" : ( 42530, 42531 ),
"HE14_1" : ( 42531, 42532 ),
"HE14_2" : ( 42532, 42533 ),
"HE14_3" : ( 42533, 42534 ),
"HE14_4" : ( 42534, 42535 ),
"HE14_5" : ( 42535, 42536 ),
"HE14_6" : ( 42536, 42537 ),
"HE14_7" : ( 42537, 42538 ),
"HE15_1" : ( 42538, 42539 ),
"HE15_2" : ( 42539, 42540 ),
"HE15_3" : ( 42540, 42541 ),
"HE15_4" : ( 42541, 42542 ),
"HE15_5" : ( 42542, 42543 ),
"HE15_6" : ( 42543, 42544 ),
"HE15_7" : ( 42544, 42545 ),
"HEm17a1" : ( 42545, 42546 ),
"HEm17a2" : ( 42546, 42547 ),
"HEm17a3" : ( 42547, 42548 ),
"HEm17a4" : ( 42548, 42549 ),
"HEm17a5" : ( 42549, 42550 ),
"HEm17a6" : ( 42550, 42551 ),
"HEf17a1" : ( 42551, 42552 ),
"HEf17a2" : ( 42552, 42553 ),
"HEf17a3" : ( 42553, 42554 ),
"HEf17a4" : ( 42554, 42555 ),
"HEf17a5" : ( 42555, 42556 ),
"HEf17a6" : ( 42556, 42557 ),
"HEm17b" : ( 42557, 42558 ),
"HEf17b" : ( 42558, 42559 ),
"HEm18a" : ( 42559, 42560 ),
"HEf18a" : ( 42560, 42561 ),
"HEm18b" : ( 42561, 42562 ),
"HEf18b" : ( 42562, 42563 ),
"HEm19" : ( 42563, 42564 ),
"HEf19" : ( 42564, 42565 ),
"HEm20a" : ( 42565, 42566 ),
"HEf20a" : ( 42566, 42567 ),
"HEm20b" : ( 42567, 42568 ),
"HEf20b" : ( 42568, 42569 ),
"HE24" : ( 42569, 42570 ),
"HE25" : ( 42570, 42571 ),
"HE26" : ( 42571, 42572 ),
"HE27" : ( 42572, 42573 ),
"HE27a" : ( 42573, 42574 ),
"HE28" : ( 42574, 42575 ),
"HEm29a" : ( 42575, 42576 ),
"HEf29a" : ( 42576, 42577 ),
"HEm29b" : ( 42577, 42578 ),
"HEf29b" : ( 42578, 42579 ),
"HEm30" : ( 42579, 42580 ),
"HEf30" : ( 42580, 42581 ),
"HEm31" : ( 42581, 42582 ),
"HEf31" : ( 42582, 42583 ),
"HEm33" : ( 42583, 42584 ),
"HEf33" : ( 42584, 42585 ),
"HEm34" : ( 42585, 42586 ),
"HEf34" : ( 42586, 42587 ),
"HEm36" : ( 42587, 42588 ),
"HEf36" : ( 42588, 42589 ),
"HE37a" : ( 42589, 42590 ),
"HE37b" : ( 42590, 42591 ),
"HE37c" : ( 42591, 42592 ),
"HEm38" : ( 42592, 42593 ),
"HEf38" : ( 42593, 42594 ),
"HEm39" : ( 42594, 42595 ),
"HEf39" : ( 42595, 42596 ),
"HE40" : ( 42596, 42597 ),
"HE40a" : ( 42597, 42598 ),
"HE40b" : ( 42598, 42599 ),
"HE41a" : ( 42599, 42600 ),
"HE41b" : ( 42600, 42601 ),
"HE42a" : ( 42601, 42602 ),
"HE42b" : ( 42602, 42603 ),
"HE43a" : ( 42603, 42604 ),
"HE43b" : ( 42604, 42605 ),
"HE43c" : ( 42605, 42606 ),
"HE43d" : ( 42606, 42607 ),
"HE44" : ( 42607, 42608 ),
"HE44a" : ( 42608, 42609 ),
"HE44b" : ( 42609, 42610 ),
"HE44c" : ( 42610, 42611 ),
"HE44d" : ( 42611, 42612 ),
"HE45a" : ( 42612, 42613 ),
"HE45b" : ( 42613, 42614 ),
"HE45c" : ( 42614, 42615 ),
"HE45d" : ( 42615, 42616 ),
"HE46" : ( 42616, 42617 ),
"HE46a" : ( 42617, 42618 ),
"HE46b" : ( 42618, 42619 ),
"HE47a" : ( 42619, 42620 ),
"HE47b" : ( 42620, 42621 ),
"HE47c" : ( 42621, 42622 ),
"HE47d" : ( 42622, 42623 ),
"HE48a" : ( 42623, 42624 ),
"HE48b" : ( 42624, 42625 ),
"HE48c" : ( 42625, 42626 ),
"HE48d" : ( 42626, 42627 ),
"HE49_1" : ( 42627, 42628 ),
"HE49_1a" : ( 42628, 42629 ),
"HE49_1b" : ( 42629, 42631 ),
"HE49_2" : ( 42631, 42632 ),
"HE49_2a" : ( 42632, 42633 ),
"HE49_2b" : ( 42633, 42635 ),
"HE49_3" : ( 42635, 42636 ),
"HE49_3a" : ( 42636, 42637 ),
"HE49_3b" : ( 42637, 42639 ),
"HE51" : ( 42639, 42640 ),
"HE51a" : ( 42640, 42642 ),
"FinalQuestionYN" : ( 42642, 42643 ),
"CHANGE_ID" : ( 42643, 42644 ),
"REAL_ID" : ( 42644, 42655 ),
"IntType" : ( 42655, 42656 ),
"QS" : ( 42656, 42657 )
}
#
# Get named field from a raw, binary "asc" SSAGA record
#
def get_field( record, name ):
(iFrom,iTo) = field_offsets[name]
return record[iFrom:iTo]
| StarcoderdataPython |
3307267 | """The Page database model."""
from __future__ import annotations
import uuid
from datetime import datetime
from typing import Any, Dict, List, Optional
from sqlalchemy import JSON, Column, DateTime, Integer, Unicode, UnicodeText
from .base import Base
__all__ = ["SqlPage"]
class SqlPage(Base):
"""The SQLAlchemy model for a Page, which is a parameterized notebook that
is available as a website.
Notes
-----
Pages can be backed by GitHub or can be added directly through the API.
GitHub-backed pages use extra columns to describe their GitHub context
and version.
.. todo::
API-added notebooks use additional columns to describe the user that
uploaded the content.
"""
__tablename__ = "pages"
name: str = Column(
Unicode(32), primary_key=True, default=lambda: uuid.uuid4().hex
)
"""The primary key, and also the ID for the Page REST API."""
ipynb: str = Column(UnicodeText, nullable=False)
"""The Jinja-parameterized notebook, as a JSON-formatted string."""
parameters: Dict[str, Any] = Column(JSON, nullable=False)
"""Parameters and their jsonschema descriptors."""
title: str = Column(UnicodeText, nullable=False)
"""Display title of the notebook."""
date_added: datetime = Column(DateTime, nullable=False)
"""Date when the page is registered through the Times Square API."""
authors: List[Dict[str, Any]] = Column(JSON, nullable=False)
"""Authors of the notebook.
The schema for this column is described by the NotebookSidecarFile
authors field schema.
"""
tags: List[str] = Column(JSON, nullable=False)
"""Tags (keywords) assigned to this page."""
uploader_username: Optional[str] = Column(Unicode(64), nullable=True)
"""Username of the uploader, if this page is uploaded without GitHub
backing.
"""
date_deleted: Optional[datetime] = Column(DateTime)
"""A nullable datetime that is set to the datetime when the page is
soft-deleted.
"""
description: Optional[str] = Column(UnicodeText)
"""Description of a page (markdown-formatted)."""
cache_ttl: Optional[int] = Column(Integer)
"""The cache TTL (seconds) for HTML renders, or None to retain renders
indefinitely.
"""
github_owner: Optional[str] = Column(Unicode(255))
"""The GitHub repository owner (username or organization name) for
GitHub-backed pages.
"""
github_repo: Optional[str] = Column(Unicode(255))
"""The GitHub repository name for GitHub-backed pages."""
repository_path_prefix: Optional[str] = Column(Unicode(2048))
"""The repository path prefix, relative to the root of the directory."""
repository_display_path_prefix: Optional[str] = Column(Unicode(2048))
"""The repository path prefix, relative to the configured root of Times
Square notebooks in a repository.
"""
repository_path_stem: Optional[str] = Column(Unicode(255))
"""The filename stem (without prefix and without extension) of the
source file in the GitHub repository for GitHub-backed pages.
The repository_source_filename_extension and
repository_sidecar_filename_extension columns provide the extensions for
the corresponding files.
"""
repository_source_extension: Optional[str] = Column(Unicode(255))
"""The filename extension of the source file in the GitHub
repository for GitHub-backed pages.
Combine with repository_path_stem to get the file path.
"""
repository_sidecar_extension: Optional[str] = Column(Unicode(255))
"""The filename extension of the sidecar YAML file in the GitHub
repository for GitHub-backed pages.
Combine with repository_path_stem to get the file path.
"""
repository_source_sha: Optional[str] = Column(Unicode(40))
"""The git tree sha of the source file for GitHub-backed pages."""
repository_sidecar_sha: Optional[str] = Column(Unicode(40))
"""The git tree sha of the sidecar YAML file for GitHub-backed pages."""
| StarcoderdataPython |
148054 | from OpenGL import GL
import glm
import importlib
import Helpers
importlib.reload(Helpers)
from Helpers import compile_shader, link_program, create_program_attribute_layout, create_vertex_array_and_draw_call,\
flatten_vertex_data, validate_vertex_data, validate_attribute_bindings
def create_cube(s):
return {
0: [
# Y-
(-s, -s, -s, 1.0),
(+s, -s, -s, 1.0),
(+s, -s, +s, 1.0),
(-s, -s, -s, 1.0),
(+s, -s, +s, 1.0),
(-s, -s, +s, 1.0),
# X+
(+s, -s, -s, 1.0),
(+s, +s, -s, 1.0),
(+s, +s, +s, 1.0),
(+s, -s, -s, 1.0),
(+s, +s, +s, 1.0),
(+s, -s, +s, 1.0),
# Y+
(+s, +s, -s, 1.0),
(-s, +s, -s, 1.0),
(-s, +s, +s, 1.0),
(+s, +s, -s, 1.0),
(-s, +s, +s, 1.0),
(+s, +s, +s, 1.0),
# X-
(-s, +s, -s, 1.0),
(-s, -s, -s, 1.0),
(-s, -s, +s, 1.0),
(-s, +s, -s, 1.0),
(-s, -s, +s, 1.0),
(-s, +s, +s, 1.0),
# Z-
(+s, -s, -s, 1.0),
(-s, +s, -s, 1.0),
(+s, +s, -s, 1.0),
(+s, -s, -s, 1.0),
(-s, -s, -s, 1.0),
(-s, +s, -s, 1.0),
# Z+
(-s, -s, +s, 1.0),
(+s, -s, +s, 1.0),
(+s, +s, +s, 1.0),
(-s, -s, +s, 1.0),
(+s, +s, +s, 1.0),
(-s, +s, +s, 1.0),
],
1: [
# Y-
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
# X+
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
# Y+
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
(0.0, 1.0, 0.0),
# X-
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
(1.0, 0.0, 0.0),
# Z-
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
# Z+
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
(0.0, 0.0, 1.0),
],
}
class Resources(object):
def __init__(self, resources=None):
if resources is None:
self.cube_vertex_array = 0
self.cube_draw_call = None
self.program = 0
else:
self.cube_vertex_array = resources.cube_vertex_array
self.cube_draw_call = resources.cube_draw_call
self.program = resources.program
def initialize(self, _frame_size):
# Program
with open("Dummy.vert") as vertex_shader_file:
vertex_shader = compile_shader(vertex_shader_file.readlines(), GL.GL_VERTEX_SHADER)
with open("Dummy.frag") as pixel_shader_file:
pixel_shader = compile_shader(pixel_shader_file.readlines(), GL.GL_FRAGMENT_SHADER)
program = link_program(vertex_shader, pixel_shader)
self.program = program
# Cube
flat_vertex_data = flatten_vertex_data(validate_vertex_data(create_cube(0.5)))
cube_vertex_array, cube_draw_call = create_vertex_array_and_draw_call(*flat_vertex_data)
self.cube_vertex_array = cube_vertex_array
self.cube_draw_call = cube_draw_call
# Validate both
validate_attribute_bindings(*create_program_attribute_layout(program), *flat_vertex_data[:2])
def dispose(self):
GL.glDeleteVertexArrays(1, [self.cube_vertex_array])
self.cube_vertex_array = 0
self.cube_draw_call = None
GL.glDeleteProgram(self.program)
self.program = 0
def render(resources, frame_size, elapsed_time):
GL.glClearColor(0.2, 0.2, 0.2, 0.0)
GL.glClearDepth(1.0)
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnable(GL.GL_CULL_FACE)
model_view_projection = (
glm.perspective(45.0, frame_size[0] / frame_size[1], 0.1, 10000.0) *
glm.lookAt(glm.vec3(3.0, 3.0, 3.0), glm.vec3(0.0, 0.0, 0.0), glm.vec3(0.0, 0.0, 1.0)) *
glm.rotate(glm.mat4(1.0), elapsed_time, glm.vec3(1.0, 0.0, 0.0))
)
GL.glUseProgram(resources.program)
GL.glUniformMatrix4fv(0, 1, False, glm.value_ptr(model_view_projection))
resources.cube_draw_call()
GL.glUseProgram(0)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_CULL_FACE)
def dispose_render():
GL.glBindVertexArray(0)
GL.glUseProgram(0)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_CULL_FACE) | StarcoderdataPython |
1820975 | #!/usr/bin/env python3
help =\
'''
Plot trajectory, or frames if solution has two spatial dimensions, generated by
running a PETSc TS program. Reads output from
-ts_monitor binary:TDATA -ts_monitor_solution binary:UDATA
Requires copies or sym-links to $PETSC_DIR/lib/petsc/bin/PetscBinaryIO.py and
$PETSC_DIR/lib/petsc/bin/petsc_conf.py.
'''
import PetscBinaryIO
from sys import exit, stdout
from time import sleep
from argparse import ArgumentParser, RawTextHelpFormatter
import numpy as np
import matplotlib.pyplot as plt
parser = ArgumentParser(description=help,
formatter_class=RawTextHelpFormatter)
parser.add_argument('tfile',metavar='TDATA',
help='from -ts_monitor binary:TDATA')
parser.add_argument('ufile',metavar='UDATA',
help='from -ts_monitor_solution binary:UDATA')
parser.add_argument('-mx',metavar='MX', type=int, default=-1,
help='spatial grid with MX points in x direction')
parser.add_argument('-my',metavar='MY', type=int, default=-1,
help='spatial grid with MY points in y direction; =MX by default')
parser.add_argument('-dof',metavar='DOF', type=int, default=1,
help='degrees of freedom of solution; =1 by default')
parser.add_argument('-c',metavar='C', type=int, default=0,
help='component; =0,1,..,dof-1; ignored if dof=1)')
parser.add_argument('-o',metavar='FILE',dest='filename',
help='image file FILE (trajectory case)')
parser.add_argument('-oroot',metavar='ROOT',dest='rootname',
help='frame files ROOT000.png,ROOT001.png,... (movie case)')
args = parser.parse_args()
if args.mx > 0 and args.my < 1:
args.my = args.mx
frames = (args.mx > 0)
io = PetscBinaryIO.PetscBinaryIO()
t = np.array(io.readBinaryFile(args.tfile)).flatten()
U = np.array(io.readBinaryFile(args.ufile)).transpose()
dims = np.shape(U)
if len(t) != dims[1]:
print('time dimension mismatch: %d != %d' % (len(t),dims[1]))
exit(1)
if frames:
if args.dof == 1:
if dims[0] != args.mx * args.my:
print('spatial dimension mismatch: %d != %d * %d (and dof=1)' % \
(dims[0],args.mx,args.my))
exit(2)
U = np.reshape(U,(args.my,args.mx,len(t)))
dims = np.shape(U)
print('solution U is shape=(%d,%d,%d)' % tuple(dims))
else:
if dims[0] != args.mx * args.my * args.dof:
print('spatial dimension mismatch: %d != %d * %d * %d' % \
(dims[0],args.mx,args.my,args.dof))
exit(3)
U = np.reshape(U,(args.my,args.mx,args.dof,len(t)))
dims = np.shape(U)
print('solution U is shape=(%d,%d,%d,%d)' % tuple(dims))
print('time t has length=%d, with mx x my = %d x %d frames' % \
(dims[-1],dims[1],dims[0]))
else:
print('time t has length=%d, solution Y is shape=(%d,%d)' % \
(len(t),dims[0],dims[1]))
framescmap = 'jet' # close to the PETSc X windows default
#framescmap = 'inferno'
#framescmap = 'gray'
if frames:
print('generating files %s000.png .. %s%03d.png:' % \
(args.rootname,args.rootname,len(t)-1))
if args.dof == 1:
plt.imshow(U[:,:,0],cmap=framescmap)
else:
plt.imshow(U[:,:,args.c,0],cmap=framescmap)
plt.title('t = %g' % t[0])
if args.rootname:
plt.savefig(args.rootname + "%03d.png" % 0)
else:
plt.ion()
plt.show()
for k in range(len(t)-1):
print('.', end =' ')
stdout.flush()
if args.dof == 1:
plt.imshow(U[:,:,k+1],cmap=framescmap)
else:
plt.imshow(U[:,:,args.c,k+1],cmap=framescmap)
plt.title('t = %g' % t[k+1])
if args.rootname:
plt.savefig(args.rootname + "%03d.png" % (k+1))
else:
plt.pause(0.1)
print('.')
else:
if args.dof > 1:
print('plotting only component %d ...' % args.c)
plt.plot(t,U[args.c],label='y[%d]' % args.c)
else:
for k in range(dims[0]):
plt.plot(t,U[k],label='y[%d]' % k)
plt.xlabel('t')
plt.legend()
if args.filename:
print('writing file %s' % args.filename)
plt.savefig(args.filename)
else:
plt.show()
| StarcoderdataPython |
5030368 | from datetime import date
from typing import Any
from typing import TypeVar
from typing import Union
from hw.alexander_sidorov.common import Errors
from hw.alexander_sidorov.common import api
from hw.alexander_sidorov.common import typecheck
T1 = TypeVar("T1")
@api
@typecheck
def task_04(birthdays: dict[T1, date]) -> Union[T1, Errors]:
"""
Returns the ID of the oldest person.
"""
validate(birthdays)
data = min(birthdays, key=lambda n: birthdays[n])
return data
def validate(birthdays: Any) -> None:
assert birthdays, "empty birthdays"
| StarcoderdataPython |
12864215 | # Copyright 2020 <NAME> (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
# <NAME>, March 2020
# Robot will continuously intercept around current position.
#
# For description and usage hints, execute with '-h'
import sys, os
import time
import logging, signal
logging.basicConfig(level=logging.INFO)
import math, random
import argparse
import falconspy
import rtdb2tools
from robotLibrary import RobotLibrary
from worldState import WorldState
from FalconsCoordinates import *
def parse_arguments():
parser = argparse.ArgumentParser(description="""Automated single-robot intercept test. Robot will choose a position in a circle, continuously attempting to intercept the ball and pass to next robot. Includes a fallback getball in case ball bounces off. See also: wrapper script interceptCircle.py.""")
parser.add_argument('-a', '--actionradius', help='zone/action radius: in case intercept fails and ball is within this radius, just do a getball fallback', type=float, default=2.0)
parser.add_argument('-c', '--circleradius', help='home position circle radius on which robot default positions are set', type=float, default=4.0)
parser.add_argument('-t', '--target', help='pass target (default: next robot)', type=float, nargs=2, default=None)
parser.add_argument('-n', '--targetnoise', help='aim given amount of meters at a random side next to the target', type=float, default=0.0)
parser.add_argument('-w', '--dontwait', help='do not wait with intercepting until previous robot has the ball', action='store_true')
parser.add_argument('-q', '--quiet', help='suppress output', action='store_true')
# TODO use option 'active' intercept?
parser.add_argument('--home', help='home position (x,y), default calculated based on available robots and circleradius', type=float, nargs=2, default=None)
parser.add_argument('-i', '--index', help='home position index to choose (starting count at 1), default calculate based on available robots', type=int, nargs=2, default=None)
parser.add_argument('-r', '--robot', help='robot ID to use (intended only for simulation)', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('--ignore', help='robots to be ignored', type=int, nargs='+', default=[1])
return parser.parse_args()
def calcCirclePos(robotIdx, numRobots, radius=3, center=(0,0)):
"""
Helper function to distribute robot positions on a circle.
"""
gamma = 2*math.pi / numRobots
x = radius * math.cos(gamma * robotIdx) + center[0]
y = radius * math.sin(gamma * robotIdx) + center[1]
phi = gamma * robotIdx - math.pi
return (x, y, phi)
class Interceptor():
def __init__(self, settings):
self.settings = settings
self.rl = RobotLibrary(settings.robot, joystick=False)
self.ws = WorldState(settings.robot)
self.ws.startMonitoring()
self.otherRobotHasBall = False
# setup logging
self.state = None
self.logger = self.initializeLogger()
if settings.quiet:
self.logger.setLevel(logging.NOTSET)
# setup signal handler for proper shutdown
self.done = False
signal.signal(signal.SIGINT, self.signalHandler)
def signalHandler(self, signal, frame):
self.done = True
self.ws.stopMonitoring()
self.rl.shutdown()
# TODO: this is not yet working as intended...
def initializeLogger(self):
"""
Setup the logging environment
"""
log = logging.getLogger() # root logger
log.setLevel(logging.INFO)
format_str = '%(asctime)s.%(msecs)03d - %(levelname)-8s - r' + str(self.settings.robot) + ' - %(message)s'
date_format = '%Y-%m-%dT%H:%M:%S'
formatter = logging.Formatter(format_str, date_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.handlers = [] # clear
log.addHandler(stream_handler)
return logging.getLogger(__name__)
def activeRobots(self):
# ignore r1, if it is present, because it can never contribute
return [r for r in self.ws.activeRobots() if not r in self.settings.ignore]
def calculateRobotIndex(self):
# optional overrule
if self.settings.index != None:
idx0 = self.settings.index[0] - 1
n = self.settings.index[1]
else:
# default: get active robots and figure out index of this robot
a = self.activeRobots()
while not self.settings.robot in a: # init robustness
time.sleep(0.1)
a = self.activeRobots()
n = len(a)
idx0 = a.index(self.settings.robot)
return (idx0, n)
def calculateHomePosition(self):
# optional overrule
if self.settings.home != None:
(x, y) = self.settings.home
rz = math.pi * 0.5
else:
# default: position on a circle
(idx0, n) = self.calculateRobotIndex()
(x, y, rz) = calcCirclePos(idx0, n, self.settings.circleradius)
# face the ball if possible
b = self.ws.getBallPosition()
if b:
rz = math.atan2(b.y - y, b.x - x)
return (x, y, rz)
def canStartIntercept(self):
# optional overrule
if self.settings.dontwait:
return True
# robot should never stand idle if ball is closeby
if self.ballCloseBy():
return True
# check if previous robot has the ball
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
otherIdx = a[(idx0-1) % n]
# wait for the pass (state change in ball possession)
# robot should not intercept when other robot is still turning for instance
otherRobotHadBall = self.otherRobotHasBall
self.otherRobotHasBall = self.ws.hasBall(otherIdx)
return self.otherRobotHasBall == False and otherRobotHadBall == True
def determineTarget(self, noise=None):
# optional overrule
if self.settings.target:
(x, y) = self.settings.target
rz = 0
else:
# calculate nominal position of next robot
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
otherIdx = a[(idx0+1) % n]
(x, y, rz) = calcCirclePos(idx0+1, n, self.settings.circleradius)
otherPos = RobotPose(x, y, rz)
# add noise?
if noise:
# add noise to RCS x (perpendicular)
ownPos = self.ws.getRobotPosition()
ownPos.Rz = math.atan2(y - ownPos.y, x - ownPos.x) # face target
otherPosRcs = otherPos.transform_fcs2rcs(ownPos)
# offset RCS x in a random direction
r = random.randint(0, 1)
otherPosRcs.x += (r * 2 - 1) * noise
# back to FCS
otherPos = otherPosRcs.transform_rcs2fcs(ownPos)
return (otherPos.x, otherPos.y) # ignore Rz
def canPass(self):
# compare current position of next robot with nominal
nominalTarget = self.determineTarget()
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
if len(a) == 1:
return True
otherIdx = a[(idx0+1) % n]
otherPos = self.ws.getRobotPosition(otherIdx)
delta = otherPos - RobotPose(*nominalTarget)
return delta.xy().size() < 0.3
def ballCloseBy(self):
bd = self.ws.ballDistance()
return bd != None and bd < self.settings.actionradius
def setState(self, state):
# only write state change
if self.state != state:
# write to RDL eventlog
os.system('export TURTLE5K_ROBOTNUMBER=' + str(self.settings.robot) + ' ; frun diagnostics sendEvent INFO "' + state + '" > /dev/null')
# write to stdout?
logging.info(state)
self.state = state
def run(self):
# iterate
while not self.done:
# move to starting position, facing ball, with coarse tolerances
homePos = self.calculateHomePosition()
self.setState('repositioning / waiting')
self.rl.move(*homePos, xyTol=0.1, rzTol=0.05)
# wait until robot can start his intercept/getBall attempt
if self.canStartIntercept():
# get the ball, preferably via intercept
while not self.ws.hasBall() and not self.done:
if self.ballCloseBy():
self.setState('getball fallback')
self.rl.getBall() # blocking
else:
self.setState('intercepting')
self.rl.interceptBall() # blocking (with not-so-obvious RUNNING/FAILED criteria -> see mp code)
# note: good weather behavior: ball comes into the action radius while the robot
# is continuously intercepting on it, until pass/fail, so the getBall
# fallback should only start after intercept returns FAILED due to the ball moving away
# other robot might still be repositioning
while not self.canPass() and not self.done:
self.setState('waiting to pass')
time.sleep(0.1)
# pass to next robot and sleep a while, to prevent directly chasing the ball
self.setState('pass')
self.rl.passTo(*self.determineTarget(self.settings.targetnoise))
time.sleep(0.5)
else:
# sleep a bit
time.sleep(0.1)
# check if robot went offline
self.done = self.settings.robot not in self.activeRobots()
def main(args):
interceptor = Interceptor(args)
interceptor.run()
if __name__ == '__main__':
args = parse_arguments()
if args.robot == 0 or args.robot == None:
raise RuntimeError("Error: could not determine robot ID, this script should run on a robot")
main(args)
| StarcoderdataPython |
11363161 | <reponame>szypkiwonsz/Physiotherapy-Management-System
import calendar
import datetime
import pandas as pd
from applications.users.models import OfficeDay, UserOffice
from utils.add_zero import add_zero
def get_number_of_days_in_month(year, month):
"""
A function that returns a list of the number of days in a month.
"""
num_days = calendar.monthrange(year, month)[1]
days = [datetime.date(year, month, day).day for day in range(1, num_days + 1)]
return days
def get_hours_in_day(earliest_time, latest_time, office_id):
"""A function that returns a list of hours between two given hours."""
split_by = UserOffice.objects.values_list(
'appointment_time_interval', flat=True).filter(pk=office_id).first()
hours = []
for i in range(earliest_time, latest_time):
for j in range(0, 60, split_by):
hours.append(f'{i}:{add_zero(j)}')
return hours
def get_dates_in_month(office_id, days_in_month, month, year):
"""The function returns all possible dates of making an appointment to the office with the hours."""
dates_in_month = []
for day in days_in_month:
dates = []
date = datetime.datetime(int(year), int(month), int(day))
office_day = OfficeDay.objects.get(office=office_id, day=date.weekday())
hours_in_day = get_hours_in_day(
earliest_time=int(office_day.earliest_appointment_time.split(':')[0]),
latest_time=int(office_day.latest_appointment_time.split(':')[0]),
office_id=office_id
)
for hour in hours_in_day:
date = f'{add_zero(day)}.{add_zero(month)}.{year} {hour}'
dates.append(date)
dates_in_month.append(dates)
return dates_in_month
def get_dates_taken(dates_taken, service):
final_dates_taken = []
for date in dates_taken:
date = [str(x.strftime('%d.%m.%Y %H:%M')) for x in
pd.date_range(date.date - datetime.timedelta(minutes=service.duration - 1),
date.date_end - datetime.timedelta(minutes=1), freq="1min")]
final_dates_taken += date
return final_dates_taken
| StarcoderdataPython |
1894615 | from appium import webdriver
import time
# from load_xls import read_excel
from pykeyboard import PyKeyboard
import xlrd
import xlwt
# 启动appium
def android_driver():
# 初始化配置,设置Desired Capabilities参数
desired_caps = {
"platformName": "Android",
"platformVersion": "7.1.2",
"deviceName": "MI 9",
"appPackage": "cmccwm.mobilemusic",
"appActivity": ".ui.base.MainActivity"
}
# 指定Appium Server
server = 'http://localhost:4723/wd/hub'
# 新建一个driver
driver = webdriver.Remote(server, desired_caps)
driver.implicitly_wait(5)
return driver
# 切换环境,change_list = ['*#testrs#*', '*#prs#*', '*#rs#*', '*#devrs#*'],输入:0:测试环境;1:预生产环境;2:生产环境;3:开发环境
def chage_env(driver, num):
xianchang_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.view.ViewGroup/android.widget.FrameLayout[2]/android.view.ViewGroup/android.widget.LinearLayout/android.view.ViewGroup[3]/android.widget.ImageView'
yanchanghui_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.view.ViewGroup/android.widget.FrameLayout[1]/android.widget.FrameLayout/android.widget.FrameLayout[2]/android.widget.RelativeLayout/android.support.v7.widget.RecyclerView/android.widget.RelativeLayout[2]/android.widget.RelativeLayout/android.widget.ImageView'
# 查找‘现场’标签
xianchang_tag = driver.find_element_by_xpath(xianchang_xpath)
xianchang_tag.click()
time.sleep(1)
# 查找‘演唱会’标签
yanchanghui_tag = driver.find_element_by_xpath(yanchanghui_xpath)
yanchanghui_tag.click()
time.sleep(1)
# 查找‘搜索’标签
driver.find_element_by_id('uikit_topbar_right_view').click()
# 键入环境接环字符,回车
change_list = ['*#testrs#*', '*#prs#*', '*#rs#*', '*#devrs#*']
change_key = change_list[num]
driver.find_element_by_id('edit_txt_search_bar_input').send_keys(change_key)
print('start change env ...')
'''
# driver.keyevent(66)
#driver.press_keycode(66)
'''
# 以上两个appium自带键入无法生效,用下方法实现回车
k = PyKeyboard()
time.sleep(1)
k.tap_key(k.enter_key)
print('end change env ...')
# 退出当前应用,启动当前应用
time.sleep(2)
driver.close_app()
time.sleep(5)
driver.launch_app()
return driver
# 读取excel数据
def read_excel():
work_book = xlrd.open_workbook('./咪咕音乐效果测试.xls')
sheet = work_book.sheet_by_name('test1')
print('sheet_name:{},sheet_rows:{},sheet_cols:{}'.format(sheet.name, sheet.nrows, sheet.ncols))
rows_num = sheet.nrows
cols_num = sheet.ncols
type_list = sheet.col_values(2)[1::]
song_list = sheet.col_values(3)[1::]
# print(type_list)
# print(song_list)
# 获取要查询的类型和查询串
search_list = []
for i in range(len(song_list)):
search_list.append((type_list[i], song_list[i]))
# print(search_list)
# 第二种获取要查询的类型和查询串
search_list1 = []
for i in range(1, rows_num):
search_list1.append((sheet.cell_value(i, 2), sheet.cell_value(i, 3)))
# print(search_list1)
return search_list
# 数据查找、截图
def search_song(driver):
search_list = read_excel()
print(search_list)
try:
for i, data in enumerate(search_list):
category = str(data[0]).strip()
if category.startswith('歌曲') or category.startswith('组合') or '歌曲别名' in data[0] or category.startswith('语义'):
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif category.startswith('专辑') or '专辑别名' in data[0] or '歌手+专辑' in data[0]:
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
# 找到专辑页,点击
zhuanji_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.HorizontalScrollView/android.widget.LinearLayout/android.support.v7.app.ActionBar.Tab[2]/android.widget.TextView'
driver.find_element_by_xpath(zhuanji_xpath).click()
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif category.startswith('MV') or category.startswith('mv') or '视频' == data[0] or 'mv别名' in data[0] or 'MV别名' in data[0]:
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
# 找到视频页,点击
shipin_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.HorizontalScrollView/android.widget.LinearLayout/android.support.v7.app.ActionBar.Tab[3]/android.widget.TextView'
driver.find_element_by_xpath(shipin_xpath).click()
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif category.startswith('歌单'):
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
# 找到歌单页,点击
gedan_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.HorizontalScrollView/android.widget.LinearLayout/android.support.v7.app.ActionBar.Tab[4]/android.widget.TextView'
driver.find_element_by_xpath(gedan_xpath).click()
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif category.startswith('歌词'):
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
# 找到歌词页,点击
geci_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.HorizontalScrollView/android.widget.LinearLayout/android.support.v7.app.ActionBar.Tab[5]/android.widget.TextView'
driver.find_element_by_xpath(geci_xpath).click()
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif (category.startswith('歌手') or category.endswith('歌手')) and '歌手+' not in data[0]:
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
# 找到歌手页,点击
geshou_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.HorizontalScrollView/android.widget.LinearLayout/android.support.v7.app.ActionBar.Tab[6]/android.widget.TextView'
driver.find_element_by_xpath(geshou_xpath).click()
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif category.startswith('演唱会'):
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
# 找到歌词页,点击
geci_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.HorizontalScrollView/android.widget.LinearLayout/android.support.v7.app.ActionBar.Tab[5]/android.widget.TextView'
driver.find_element_by_xpath(geci_xpath).click()
# 滑动
width = driver.get_window_size()['width']
height = driver.get_window_size()['height']
time.sleep(0.2)
start_x = width // 4 * 3
start_y = height // 2
distance = width // 2
end_x = start_x - distance
end_y = start_y
for _ in range(2):
time.sleep(1)
driver.swipe(start_x, start_y, end_x, end_y)
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif category.startswith('票务'):
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
# 找到歌词页,点击
geci_xpath = '/hierarchy/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.LinearLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.RelativeLayout/android.widget.FrameLayout/android.widget.RelativeLayout/android.widget.HorizontalScrollView/android.widget.LinearLayout/android.support.v7.app.ActionBar.Tab[5]/android.widget.TextView'
driver.find_element_by_xpath(geci_xpath).click()
# 滑动3次
width = driver.get_window_size()['width']
height = driver.get_window_size()['height']
time.sleep(0.2)
start_x = width // 4 * 3
start_y = height // 2
distance = width // 2
end_x = start_x - distance
end_y = start_y
for _ in range(3):
time.sleep(1)
driver.swipe(start_x, start_y, end_x, end_y)
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif '视频彩铃' in data[0]:
print('没法搞:%d,%s,%s' % (i + 2, data[0], data[1]))
elif (category.endswith('提示') or category.startswith('提示')) and len(category) < 11:
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词
driver.find_element_by_id('edt_search_input').send_keys(search_name)
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
elif len(category) == 0:
pass
else:
# 找到咪咕音乐搜索框并点击
driver.find_element_by_id('music_homepage_search_ll_v7').click()
time.sleep(2)
search_name = data[1]
if type(search_name) is float:
search_name = str(int(search_name)).strip()
else:
search_name = str(search_name).strip()
# 搜索框键入搜索词,按下回车键
driver.find_element_by_id('edt_search_input').send_keys(search_name)
driver.keyevent(66)
print('保存图片:%d,%s,%s' % (i + 2, data[0], data[1]))
time.sleep(3)
# 截图并保存
driver.get_screenshot_as_file('./picture/' + str(i + 2) + '.' + category + '.' + search_name + '.png')
# 返回
driver.find_element_by_id('iv_search_input_back').click()
time.sleep(1)
finally:
driver.close_app()
if __name__ == '__main__':
start_time = time.time()
driver = android_driver()
# 输入:0:测试环境;1:预生产环境;2:生产环境;3:开发环境
driver = chage_env(driver, 2)
time.sleep(5)
search_song(driver)
end_time = time.time()
cast_time = end_time - start_time
print('cast time :%s' % cast_time)
| StarcoderdataPython |
1848308 | # -*- coding: utf-8 -*-
import os
from peewee import *
from playhouse.pool import PooledPostgresqlDatabase
from src.config import DB_CONNECTION_DB_NAME, DB_CONNECTION_HOST, DB_CONNECTION_USERNAME, DB_CONNECTION_PORT, \
DB_CONNECTION_PASSWORD
__author__ = "zebraxxl"
DB_CONNECTION = PooledPostgresqlDatabase(database=DB_CONNECTION_DB_NAME, host=DB_CONNECTION_HOST,
port=int(DB_CONNECTION_PORT), user=DB_CONNECTION_USERNAME,
password=DB_CONNECTION_PASSWORD)
class BigSerialField(Field):
db_field = "bigserial"
DB_CONNECTION.register_fields({
"bigserial": "bigserial"
})
class BaseModel(Model):
class Meta:
database = DB_CONNECTION
| StarcoderdataPython |
4870712 | <reponame>ProEgitim/Python-Dersleri-BEM<gh_stars>1-10
"""in operatoru içinde olup olmadıgına bakar"""
liste = [1,2,3,4,5,6,7,8,9]
for eleman in liste:
print("Eleman: ",eleman)
toplam=0
for e in liste:
toplam += e
print("toplam: ",toplam)
for sayi in liste:
if(sayi % 2 == 0):
print(sayi," sayimiz çift")
else:
print(sayi," sayimiz tek")
karekterler="Erdogan"
for item in karekterler:
print(3*item)
"""
while döngüsü
"""
x=0
while(x<10):
print("x in degeri ",x)
x+=1
a=0
while(a<len(liste)):
print("index: ",a," Eleman",liste[a])
a+=1 | StarcoderdataPython |
254286 | <reponame>ikekilinc/Columbus
# Columbus - A Smart Navigation System for the Visually-Impaired
# This is the main file that controls the functionalities of Columbus. This file
# calls others in the main project folder to import and export data as needed to
# operate Columbus and create a cogent user-experience. This file will call
# Columbus' speech engine, node mapper, path finder, and image number reader.
from speech_engine import *
from node_mapper import *
from path_finder import *
"""
import speech_engine as speech_engine
import node_mapper as node_mapper
import path_finder as path_finder
class MainProgram(object):
def __init__(self):
pass
"""
def run():
# User inputs destination.
destination = destinationInput()
# Columbus asks where user is (TEMP).
startLocationInput = startLocationInput()
# Columbus searches for and determines path to destination.
# Columbus speaks path directions.
def destinationInput():
# Columbus asks where user would like to go.
speakText = "Where would you like to go?"
speakColumbus(speakText)
# User inputs destination
destination = recognizeSpeech("location")
# Columbus repeats destination for confirmation.
speechText = "Is your destination %s" % destination
# User confirms or corrects (if incorrect, repeat destination input).
confirmation = recognizeSpeech("filter")
if confirmation == "yes":
return destination
else:
return destinationInput()
def startLocationInput():
# Columbus asks where user is now.
speakText = "Where are you now?"
speakColumbus(speakText)
# User inputs start location.
startLocation = recognizeSpeech("location")
# Columbus repeats start location for confirmation.
speakText = "Is your current location Wean Hall %s" % startLocation
speakColumbus(speakText)
# User confirms or corrects (if incorrect, repeat start location input).
confirmation = recognizeSpeech("filter")
if confirmation == "yes":
return startLocation
else:
return startLocationInput()
def speakColumbus(speechText):
pass
| StarcoderdataPython |
4972446 | """This module manages the automatic generation of new context files based on
a list of new rmaps and a baseline context.
"""
import os.path
import sys
import shutil
import re
import glob
import crds
from crds.core import (rmap, utils, log, cmdline, config)
from . import refactor
# =============================================================================
# Code used by the website to determine affected
def get_update_map(old_pipeline, updated_rmaps):
"""Given the name of a pipeline context, `old_pipeline`, and a list
of new rmap names, `updated_rmaps`, return the mapping:
{ imap_name : [ updates_for_that_imap, ... ], ... }
Updated rmaps can be rmap names or strings of the form:
<instrument>_<filekind>_"n/a"
e.g. miri_dflat_n/a
"""
pctx = crds.get_pickled_mapping(old_pipeline) # reviewed
updates = {}
for update in sorted(updated_rmaps):
if update.endswith(("_n/a","_N/A")):
instrument, _filekind, na = update.split("_")
else: # should be an rmap name
instrument, _filekind = utils.get_file_properties(pctx.observatory, update)
imap_name = pctx.get_imap(instrument).filename
if imap_name not in updates:
updates[imap_name] = []
assert update not in updates[imap_name], "Duplicate update for " + repr(update)
updates[imap_name].append(update)
return updates
# =============================================================================
# Code used by the website to generate new contexts.
def generate_new_contexts(old_pipeline, updates, new_names):
"""Generate new pipeline and instrument context files given:
old_pipeline -- name of pipeline mapping
updates -- { old_imap : [ new_rmaps ], ... }
new_names -- { old_pmap : new_pmap, old_imaps : new_imaps }
"""
new_names = dict(new_names)
for imap_name in sorted(updates):
hack_in_new_maps(imap_name, new_names[imap_name], updates[imap_name])
new_pipeline = new_names.pop(old_pipeline)
new_imaps = list(new_names.values())
hack_in_new_maps(old_pipeline, new_pipeline, new_imaps)
return [new_pipeline] + new_imaps
def hack_in_new_maps(old, new, updated_maps):
"""Given mapping named `old`, create a modified copy named `new` which
installs each map of `updated_maps` in place of it's predecessor.
"""
copy_mapping(old, new)
for mapping in sorted(updated_maps):
key, replaced, replacement = insert_mapping(new, mapping)
if replaced:
log.info("Replaced", repr(replaced), "with", repr(replacement), "for", repr(key), "in", repr(old), "producing", repr(new))
else:
log.info("Added", repr(replacement), "for", repr(key), "in", repr(old), "producing", repr(new))
def insert_mapping(context, mapping):
"""Replace the filename in file `context` with the same generic name
as `mapping` with `mapping`. Re-write `context` in place.
If mapping is of the form <instrument>_<type>_"n/a", then it specifies
that <type> of <instrument> should be set to "N/A".
"""
# 'ACS' : 'hst_acs.imap',
where = rmap.locate_mapping(context)
# readonly caching is ok because this call is always made on a newly named
# copy of the original rmap; the only thing mutated is the uncached new mapping.
loaded = rmap.asmapping(context, cache="readonly")
if mapping.endswith(("_n/a", "_N/A")):
instrument, key, special = mapping.split("_")
replacement = special.upper()
else:
key = loaded.get_item_key(mapping)
replacement = mapping
key, replaced = loaded.set_item(key, replacement)
loaded.write(where)
return key, replaced, replacement
def copy_mapping(old_map, new_map):
"""Make a copy of mapping `old_map` named `new_map`."""
old_path = rmap.locate_mapping(old_map)
new_path = rmap.locate_mapping(new_map)
assert not os.path.exists(new_path), "New mapping file " + repr(new_map) + " already exists."
shutil.copyfile(old_path, new_path)
# =============================================================================
# Code for making "fake/test" new contexts on the command line.
def new_context(old_pipeline, updated_rmaps):
"""Given a pipeline mapping name `old_pipeline`, and a list of the names
of new rmaps, `updated_rmaps`, generate new imaps as needed and a single
pmap which refers to them all.
Returns { old_name : fake_names }
"""
updates = get_update_map(old_pipeline, updated_rmaps)
new_names = generate_fake_names(old_pipeline, updates)
generate_new_contexts(old_pipeline, updates, new_names)
return new_names
def generate_fake_names(old_pipeline, updates):
"""Generate a map from old pipeline and instrument context names to new
names for their updated replacements. "Fake" names only work locally
and may collide with CRDS server names... and hence would not be
submissible.
"""
new_names = {}
new_names[old_pipeline] = fake_name(old_pipeline)
for old_imap in sorted(updates):
new_names[old_imap] = fake_name(old_imap)
return new_names
def fake_name(old_map):
"""Given and old mapping name, `old_map`, adjust the serial number to
create a new mapping name of the same series. This name is fake in the
sense that it is local to a developer's machine.
"""
if re.search(r"_\d+", old_map):
map_glob = re.sub(r"_\d+(\..map)", r"_*\1", old_map)
same_maps = sorted(glob.glob(config.locate_mapping(map_glob)))
if same_maps:
last_map = same_maps[-1]
match = re.search(r"_(\d+)\..map", last_map)
serial = int(match.group(1), 10) + 1
new_map = re.sub(r"_(\d+)(\.[pir]map)", r"_%04d\2" % serial, old_map)
else:
new_map = old_map
elif re.search(r"\w+[^\d]+\..map", old_map):
# if no serial, start off existing sequence as 0001
parts = os.path.splitext(old_map)
new_map = parts[0] + "_0001" + parts[1]
new_map = fake_name(new_map)
else:
raise ValueError("Unrecognized mapping filename " + repr(old_map))
if os.path.exists(rmap.locate_mapping(new_map)):
# recurse until there's a free name, or eventually fail.
return fake_name(new_map)
else:
if not new_map.startswith("./"):
new_map = "./" + os.path.basename(new_map)
return new_map
def update_header_names(name_map):
"""Update the .name and .derived_from fields in mapping new_path.header
to reflect derivation from old_path and name new_path.
"""
for old_path, new_path in sorted(name_map.items()):
old_base, new_base = os.path.basename(old_path), os.path.basename(new_path)
refactor.update_derivation(new_path, old_base)
log.info("Adjusting name", repr(new_base), "derived_from", repr(old_base),
"in", repr(new_path))
return name_map # no change
# ============================================================================
class NewContextScript(cmdline.Script):
"""Defines the command line handler for crds newcontext."""
description = """Based on `old_pmap`, generate a new .pmap and .imaps as
needed in order to support `new_rmaps`. Currently generated contexts have
fake names and are for local test purposes only, not formal distribution.
"""
def add_args(self):
self.add_argument("old_pmap")
self.add_argument("new_rmap", nargs="+", help="Names of new rmaps to insert into the new context.""")
def main(self):
name_map = new_context(self.args.old_pmap, self.args.new_rmap)
update_header_names(name_map)
return log.errors()
if __name__ == "__main__":
sys.exit(NewContextScript()())
| StarcoderdataPython |
1658268 | #verkefni2_12
import re
from fractions import Fraction
def scale(s, ratio):
get_numerals = re.findall(r"([0-9]?[r\/]*[0-9])", s)
calculated = []
for val in get_numerals:
val = val + '*' + ratio
val = eval(val)
calculated.append(str(Fraction(val).limit_denominator()))
#split_string = s.split()
#print(split_string)
#split_string = ' '.join(split_string)
#print(split_string)
print(calculated)
formatted = re.sub(r"([0-9]?[r\/]*[0-9])", lambda match: calculated.pop(0), s)
print(formatted)
scale('''Ingredients
4 skinless, boneless chicken thighs
1/2 cup soy sauce
1/2 cup ketchup
1/3 cup honey
3 cloves garlic, minced
1 teaspoon dried basil''', '1/2') | StarcoderdataPython |
1881639 | <gh_stars>0
import unittest
from app.models import Comment
def setUp(self):
self.new_comment = Comment(id=2, pitch_id=100, pitch_comment='Test Comment', user_id=11)
def tearDown(self):
Comment.query.delete()
def test_check_instance_variables(self):
self.assertEquals(self.new_comment.id, 2)
self.assertEquals(self.new_comment.pitch_id=100)
self.assertEquals(self.new_comment.pitch_comment, 'Test comment')
self.assertEquals(self.new_comment.user_id, 11)
def test_comment_save(self):
self.new_comment.save_comment()
self.assertTrue(len(Comment.query.all()) > 0)
def test_get_comment_by_id(self):
self.new_comment.save_comment()
got_comments = Comment.get_comments(2) | StarcoderdataPython |
6580373 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright © 2019 <NAME>
""" Interactive layout figure with paraxial editing
.. Created on Thu Mar 14 10:20:33 2019
.. codeauthor: <NAME>
"""
import numpy as np
from rayoptics.gui.util import bbox_from_poly
from rayoptics.mpl.interactivefigure import InteractiveFigure
from rayoptics.elem.layout import LensLayout
class InteractiveLayout(InteractiveFigure):
""" Editable version of optical system layout, aka Live Layout
Attributes:
opt_model: parent optical model
refresh_gui: function to be called on refresh_gui event
offset_factor: how much to draw rays before first surface
do_draw_rays: if True, draw edge rays
do_paraxial_layout: if True, draw editable paraxial axial and chief ray
"""
def __init__(self, opt_model, refresh_gui=None,
offset_factor=0.05,
do_draw_rays=True,
do_draw_beams=True,
do_draw_edge_rays=True,
do_draw_ray_fans=False,
num_rays_in_fan=11,
do_paraxial_layout=False,
**kwargs):
self.refresh_gui = refresh_gui
is_dark = kwargs['is_dark'] if 'is_dark' in kwargs else False
self.layout = LensLayout(opt_model, is_dark=is_dark)
if do_draw_rays:
self.do_draw_beams = do_draw_beams
self.do_draw_edge_rays = do_draw_rays
else:
self.do_draw_beams = False
self.do_draw_edge_rays = False
self.do_draw_edge_rays = do_draw_edge_rays
self.do_draw_ray_fans = do_draw_ray_fans
self.num_rays_in_fan = num_rays_in_fan
self.do_paraxial_layout = do_paraxial_layout
self.offset_factor = offset_factor
super().__init__(**kwargs)
def sync_light_or_dark(self, is_dark, **kwargs):
self.layout.sync_light_or_dark(is_dark)
super().sync_light_or_dark(is_dark, **kwargs)
def update_data(self, **kwargs):
self.artists = []
concat_bbox = []
layout = self.layout
self.ele_shapes = layout.create_element_entities(self)
self.ele_bbox = self.update_patches(self.ele_shapes)
concat_bbox.append(self.ele_bbox)
if self.do_draw_beams or self.do_draw_edge_rays or self.do_draw_ray_fans:
sl_so = layout.system_length(self.ele_bbox,
offset_factor=self.offset_factor)
system_length, start_offset = sl_so
if self.do_draw_beams or self.do_draw_edge_rays:
self.ray_shapes = layout.create_ray_entities(self, start_offset)
self.ray_bbox = self.update_patches(self.ray_shapes)
concat_bbox.append(self.ray_bbox)
if self.do_draw_ray_fans:
self.rayfan_shapes = layout.create_ray_fan_entities(
self, start_offset,
num_rays=self.num_rays_in_fan
)
self.rayfan_bbox = self.update_patches(self.rayfan_shapes)
concat_bbox.append(self.rayfan_bbox)
if self.do_paraxial_layout:
self.parax_shapes = layout.create_paraxial_ray_entities(self)
self.parax_bbox = self.update_patches(self.parax_shapes)
concat_bbox.append(self.parax_bbox)
sys_bbox = np.concatenate(concat_bbox)
self.sys_bbox = bbox_from_poly(sys_bbox)
return self
def action_complete(self):
super().action_complete()
self.do_action = self.do_shape_action
def fit_axis_limits(self):
return self.sys_bbox
| StarcoderdataPython |
3412500 | <reponame>CREATE-CHENG/skatelife
from django.contrib import admin
from .models import User
from imagekit.admin import AdminThumbnail
class AvatarAdmin(admin.ModelAdmin):
list_display = ('__str__', 'admin_thumbnail')
admin_thumbnail = AdminThumbnail(image_field='avatar_thumbnail')
admin.site.register(User, AvatarAdmin)
| StarcoderdataPython |
11226719 | <filename>tests/test_api.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import numpy as np
from funcat import *
def test_000001():
from funcat.data.tushare_backend import TushareDataBackend
set_data_backend(TushareDataBackend())
T("20161216")
S("000001.XSHG")
assert np.equal(round(CLOSE.value, 2), 3122.98)
assert np.equal(round(OPEN[2].value, 2), 3149.38)
assert np.equal(round((CLOSE - OPEN).value, 2), 11.47)
assert np.equal(round((CLOSE - OPEN)[2].value, 2), -8.85)
assert np.equal(round(((CLOSE / CLOSE[1] - 1) * 100).value, 2), 0.17)
assert np.equal(round(MA(CLOSE, 60)[2].value, 2), 3131.08)
assert np.equal(round(MACD().value, 2), -37.18)
assert np.equal(round(HHV(HIGH, 5).value, 2), 3245.09)
assert np.equal(round(LLV(LOW, 5).value, 2), 3100.91)
assert COUNT(CLOSE > OPEN, 5) == 2
| StarcoderdataPython |
1676265 | # Status: ported
# Base revision: 64488
#
# Copyright (c) 2005, 2010 <NAME>.
# Copyright 2006 <NAME>.
#
# Use, modification and distribution is subject to the Boost Software
# License Version 1.0. (See accompanying file LICENSE.txt or
# https://www.bfgroup.xyz/b2/LICENSE.txt)
# Provides mechanism for installing whole packages into a specific directory
# structure. This is opposed to the 'install' rule, that installs a number of
# targets to a single directory, and does not care about directory structure at
# all.
# Example usage:
#
# package.install boost : <properties>
# : <binaries>
# : <libraries>
# : <headers>
# ;
#
# This will install binaries, libraries and headers to the 'proper' location,
# given by command line options --prefix, --exec-prefix, --bindir, --libdir and
# --includedir.
#
# The rule is just a convenient wrapper, avoiding the need to define several
# 'install' targets.
#
# The only install-related feature is <install-source-root>. It will apply to
# headers only and if present, paths of headers relatively to source root will
# be retained after installing. If it is not specified, then "." is assumed, so
# relative paths in headers are always preserved.
import b2.build.feature as feature
import b2.build.property as property
import b2.util.option as option
import b2.tools.stage as stage
from b2.build.alias import alias
from b2.manager import get_manager
from b2.util import bjam_signature
from b2.util.utility import ungrist
import os
feature.feature("install-default-prefix", [], ["free", "incidental"])
@bjam_signature((["name", "package_name", "?"], ["requirements", "*"],
["binaries", "*"], ["libraries", "*"], ["headers", "*"]))
def install(name, package_name=None, requirements=[], binaries=[], libraries=[], headers=[]):
requirements = requirements[:]
binaries = binaries[:]
libraries
if not package_name:
package_name = name
if option.get("prefix"):
# If --prefix is explicitly specified on the command line,
# then we need wipe away any settings of libdir/includir that
# is specified via options in config files.
option.set("bindir", None)
option.set("libdir", None)
option.set("includedir", None)
# If <install-source-root> is not specified, all headers are installed to
# prefix/include, no matter what their relative path is. Sometimes that is
# what is needed.
install_source_root = property.select('install-source-root', requirements)
if install_source_root:
requirements = property.change(requirements, 'install-source-root', None)
install_header_subdir = property.select('install-header-subdir', requirements)
if install_header_subdir:
install_header_subdir = ungrist(install_header_subdir[0])
requirements = property.change(requirements, 'install-header-subdir', None)
# First, figure out all locations. Use the default if no prefix option
# given.
prefix = get_prefix(name, requirements)
# Architecture dependent files.
exec_locate = option.get("exec-prefix", prefix)
# Binaries.
bin_locate = option.get("bindir", os.path.join(prefix, "bin"))
# Object code libraries.
lib_locate = option.get("libdir", os.path.join(prefix, "lib"))
# Source header files.
include_locate = option.get("includedir", os.path.join(prefix, "include"))
stage.install(name + "-bin", binaries, requirements + ["<location>" + bin_locate])
alias(name + "-lib", [name + "-lib-shared", name + "-lib-static"])
# Since the install location of shared libraries differs on universe
# and cygwin, use target alternatives to make different targets.
# We should have used indirection conditioanl requirements, but it's
# awkward to pass bin-locate and lib-locate from there to another rule.
alias(name + "-lib-shared", [name + "-lib-shared-universe"])
alias(name + "-lib-shared", [name + "-lib-shared-cygwin"], ["<target-os>cygwin"])
# For shared libraries, we install both explicitly specified one and the
# shared libraries that the installed executables depend on.
stage.install(name + "-lib-shared-universe", binaries + libraries,
requirements + ["<location>" + lib_locate, "<install-dependencies>on",
"<install-type>SHARED_LIB"])
stage.install(name + "-lib-shared-cygwin", binaries + libraries,
requirements + ["<location>" + bin_locate, "<install-dependencies>on",
"<install-type>SHARED_LIB"])
# For static libraries, we do not care about executable dependencies, since
# static libraries are already incorporated into them.
stage.install(name + "-lib-static", libraries, requirements +
["<location>" + lib_locate, "<install-dependencies>on", "<install-type>STATIC_LIB"])
stage.install(name + "-headers", headers, requirements \
+ ["<location>" + os.path.join(include_locate, s) for s in install_header_subdir]
+ install_source_root)
alias(name, [name + "-bin", name + "-lib", name + "-headers"])
pt = get_manager().projects().current()
for subname in ["bin", "lib", "headers", "lib-shared", "lib-static", "lib-shared-universe", "lib-shared-cygwin"]:
pt.mark_targets_as_explicit([name + "-" + subname])
@bjam_signature((["target_name"], ["package_name"], ["data", "*"], ["requirements", "*"]))
def install_data(target_name, package_name, data, requirements):
if not package_name:
package_name = target_name
if option.get("prefix"):
# If --prefix is explicitly specified on the command line,
# then we need wipe away any settings of datarootdir
option.set("datarootdir", None)
prefix = get_prefix(package_name, requirements)
datadir = option.get("datarootdir", os.path.join(prefix, "share"))
stage.install(target_name, data,
requirements + ["<location>" + os.path.join(datadir, package_name)])
get_manager().projects().current().mark_targets_as_explicit([target_name])
def get_prefix(package_name, requirements):
specified = property.select("install-default-prefix", requirements)
if specified:
specified = ungrist(specified[0])
prefix = option.get("prefix", specified)
requirements = property.change(requirements, "install-default-prefix", None)
# Or some likely defaults if neither is given.
if not prefix:
if os.name == "nt":
prefix = "C:\\" + package_name
elif os.name == "posix":
prefix = "/usr/local"
return prefix
| StarcoderdataPython |
5040601 | <filename>conanfile.py
from conans import ConanFile, CMake
from os import getcwd
class Domains(ConanFile):
name = "domains"
version = "0.0.1"
url = "https://github.com/skizzay/domains.git"
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "txt", "env", "ycm"
exports = "CMakeLists.txt", "domains/*"
#requires = 'range-v3/latest@ericniebler/stable', 'frozen/0.1@serge-sans-paille/testing'
requires = 'range-v3/0.3.0@ericniebler/stable'
dev_requires = 'catch/1.5.0@TyRoXx/stable', 'kerchow/1.0.1@skizzay/stable'
def build(self):
self.output.info(str(self.settings.compiler.version))
cmake = CMake(self.settings)
self._execute("cmake %s %s %s" % (self.conanfile_directory, cmake.command_line, self._build_tests))
self._execute("cmake --build %s %s" % (getcwd(), cmake.build_config))
if self.scope.dev:
self.run("ctest")
@property
def _build_tests(self):
if self.scope.dev:
return "-DBUILD_TESTS=1"
return ""
def _execute(self, command):
self.output.info(command)
self.run(command)
| StarcoderdataPython |
6596720 | from aws_cdk import cdk
class %name.PascalCased%Stack(cdk.Stack):
def __init__(self, app: cdk.App, id: str, **kwargs) -> None:
super().__init__(app, id)
# The code that defines your stack goes here
| StarcoderdataPython |
1770464 | <gh_stars>10-100
from pyzbar import pyzbar
from PIL import Image
def decode_qr_code(img):
txt = pyzbar.decode(img, symbols=[pyzbar.ZBarSymbol.QRCODE])
if len(txt):
return txt[0].data.decode("utf-8")
else:
return "decode fail"
qrcode = Image.new('RGB', (27, 27),color=(255,255,255))
f = open("qrcode.txt")
txt = f.read()
x=y=1
for i in txt:
if i == "1":
color = 0
else:
color = 255
qrcode.putpixel((x,y), (color,color,color))
x += 1
if x > 25:
x = 1
y += 1
# qrcode.show()
print(decode_qr_code(qrcode))
| StarcoderdataPython |
5025801 | <reponame>ttumiel/interpret
"Basic attribution that uses the gradient as the attribution map"
from .attribute import Attribute
class Gradient(Attribute):
"""Uses the gradient of the network with respect to a target class to
create an attribution map.
"""
def __init__(self, model, input_img, target_class):
assert input_img.requires_grad, "Input image must require_grad"
assert input_img.size(0) == 1, "Input image must have batch size of 1"
self.m = model.eval()
self.target_class = target_class
self.input_data = input_img
self.calculate_gradient()
def calculate_gradient(self):
if self.input_data.grad is not None:
self.input_data.grad.fill_(0)
loss = self.m(self.input_data)[0, self.target_class]
loss.backward()
self.data = self.input_data.grad.detach().clone().squeeze()
| StarcoderdataPython |
1983276 | <reponame>timcera/tsgettoolbox<filename>src/tsgettoolbox/ulmo/usgs/nwis/__init__.py
# -*- coding: utf-8 -*-
"""
`USGS National Water Information System`_ web services
.. _USGS National Water Information System: http://waterdata.usgs.gov/nwis
"""
from __future__ import absolute_import
from tsgettoolbox.ulmo import util
from . import core
from .core import get_site_data, get_sites
try:
from . import hdf5
except ImportError:
hdf5 = util.module_with_dependency_errors(
[
"get_site",
"get_sites",
"get_site_data",
"update_site_list",
"update_site_data",
]
)
pytables = util.module_with_deprecation_warnings(
[
hdf5.get_site,
hdf5.get_sites,
hdf5.get_site_data,
hdf5.update_site_list,
hdf5.update_site_data,
],
"the nwis.pytables module has moved to nwis.hdf5 - nwis.pytables "
"is deprecated and will be removed in a future ulmo release.",
)
| StarcoderdataPython |
3425620 | """
Base class for Infinite Gaussian mixture model (IGMM)
Date: 2017
"""
from numpy.linalg import cholesky, det, inv, slogdet
from scipy.misc import logsumexp
from scipy.special import gammaln
import logging
import math
import numpy as np
import time
from scipy import stats
import copy
import matplotlib.pyplot as plt
from ..gaussian.gaussian_components import GaussianComponents
from ..gaussian.gaussian_components_diag import GaussianComponentsDiag
from ..gaussian.gaussian_components_fixedvar import GaussianComponentsFixedVar
from ..utils import utils
from ..utils.plot_utils import plot_ellipse, plot_mixture_model
from ..gmm import GMM
logger = logging.getLogger(__name__)
#-----------------------------------------------------------------------------#
# IGMM CLASS #
#-----------------------------------------------------------------------------#
class IGMM(GMM):
"""
An infinite Gaussian mixture model (IGMM).
See `GaussianComponents` for an overview of the parameters not mentioned
below.
Parameters
----------
alpha : float
Concentration parameter for the Dirichlet process.
assignments : vector of int or str
If vector of int, this gives the initial component assignments. The
vector should therefore have N entries between 0 and `K`. Values of
-1 is also allowed, indicating that the data vector does not belong to
any component. Alternatively, `assignments` can take one of the
following values:
- "rand": Vectors are assigned randomly to one of `K` components.
- "one-by-one": Vectors are assigned one at a time; the value of
`K` becomes irrelevant.
- "each-in-own": Each vector is assigned to a component of its own.
K : int
The initial number of mixture components: this is only used when
`assignments` is "rand".
covariance_type : str
String describing the type of covariance parameters to use. Must be
one of "full", "diag" or "fixed".
"""
def __init__(
self, X, kernel_prior, alpha, save_path, assignments="rand", K=1, K_max=None,
covariance_type="full"
):
super(IGMM, self).__init__()
data_shape = X.shape
if len(data_shape) < 2:
raise ValueError('X must be at least a 2-dimensional array.')
self.save_path = save_path
self.alpha = alpha
self.N, self.D = X.shape
# Initial component assignments
if assignments == "rand":
assignments = np.random.randint(0, K, self.N)
# Make sure we have consequetive values
for k in xrange(assignments.max()):
while len(np.nonzero(assignments == k)[0]) == 0:
assignments[np.where(assignments > k)] -= 1
if assignments.max() == k:
break
elif assignments == "one-by-one":
assignments = -1*np.ones(self.N, dtype="int")
assignments[0] = 0 # first data vector belongs to first component
elif assignments == "each-in-own":
assignments = np.arange(self.N)
else:
# assignments is a vector
pass
if covariance_type == "full":
self.components = GaussianComponents(X, kernel_prior, assignments, K_max)
elif covariance_type == "diag":
self.components = GaussianComponentsDiag(X, kernel_prior, assignments, K_max)
elif covariance_type == "fixed":
self.components = GaussianComponentsFixedVar(X, kernel_prior, assignments, K_max)
else:
assert False, "Invalid covariance type."
def setup_distribution_dict(self, num_saved):
"""
setup the distribution dictionary, with the input of number to saved
:param num_saved: number of clusters to save
:return: distribution dictionary
"""
distribution_dict = {}
distribution_dict["mean"] = np.zeros(shape=(num_saved, 0))
distribution_dict["variance"] = np.zeros(shape=(num_saved, 0))
distribution_dict["weights"] = np.zeros(shape=(num_saved, 0))
return distribution_dict
def update_distribution_dict(self, distribution_dict, weight_first):
"""
update distribution dicrionary
:param distribution_dict: dictionbution dictionary
:param weight_first: update by weight first or mean first
:return: distribution dictionary
"""
### if dimension = 2; save plot for ellipses in the save_path
if self.D == 2:
## Plot results
fig = plt.figure()
ax = fig.add_subplot(111)
plot_mixture_model(ax, self)
plt.savefig(self.save_path + '/model.png')
plt.savefig(self.save_path + '/model.pdf')
for k in xrange(self.components.K):
mu, sigma = self.components.map(k)
plot_ellipse(ax, mu, sigma)
# plt.show()
plt.savefig(self.save_path + '/ellipse.png')
plt.savefig(self.save_path + '/ellipse.pdf')
## get mean, sd and weights
means = []
sds = []
for k in xrange(self.components.K):
mu, sigma = self.components.map(k)
means.append(mu)
sds.append(sigma)
if weight_first:
## label switching index
weights = self.gibbs_weight()
idx = np.argsort(weights)
sds = np.array(sds).flatten()
means = np.array(means).flatten()
# weights = self.gibbs_weight()
# label switching
means = self.label_switch(idx, means)
sds = self.label_switch(idx, sds)
weights = self.label_switch(idx, weights)
else:
## label switching index
means = np.array(means).flatten()
idx = np.argsort(means)
sds = np.array(sds).flatten()
weights = self.gibbs_weight()
# label switching
means = self.label_switch(idx, means)
sds = self.label_switch(idx, sds)
weights = self.label_switch(idx, weights)
# back up for next iteration
self.old_mean = means
self.old_sigma = sds
means = means.reshape((means.shape[0], 1))
sds = sds.reshape((sds.shape[0], 1))
weights = weights.reshape((weights.shape[0], 1))
distribution_dict["mean"] = np.hstack((distribution_dict["mean"], means))
distribution_dict["variance"] = np.hstack((distribution_dict["variance"], sds))
distribution_dict["weights"] = np.hstack((distribution_dict["weights"], weights))
return distribution_dict
def log_marg(self):
"""Return log marginal of data and component assignments: p(X, z)"""
# Log probability of component assignment P(z|alpha)
# Equation (10) in Wood and Black, 2008
# Use \Gamma(n) = (n - 1)!
facts_ = gammaln(self.components.counts[:self.components.K])
facts_[self.components.counts[:self.components.K] == 0] = 0 # definition of log(0!)
log_prob_z = (
(self.components.K - 1)*math.log(self.alpha) + gammaln(self.alpha)
- gammaln(np.sum(self.components.counts[:self.components.K])
+ self.alpha) + np.sum(facts_)
)
log_prob_X_given_z = self.components.log_marg()
return log_prob_z + log_prob_X_given_z
def gibbs_weight(self):
"""
Get weight vector for each gibbs iteration
:return: weight vector
"""
Nk = self.components.counts[:self.components.K].tolist()
alpha = [Nk[cid] + self.alpha / self.components.K
for cid in range(self.components.K)]
return stats.dirichlet(alpha).rvs(size=1).flatten()
# # @profile
# def gibbs_sample(self, n_iter, _true_assignment, n_print=20):
# """
# Perform `n_iter` iterations Gibbs sampling on the IGMM.
#
# A record dict is constructed over the iterations, which contains
# several fields describing the sampling process. Each field is described
# by its key and statistics are given in a list which covers the Gibbs
# sampling iterations. This dict is returned.
# """
#
# # Setup record dictionary
# record_dict = {}
# record_dict["sample_time"] = []
# start_time = time.time()
# record_dict["log_marg"] = []
# record_dict["components"] = []
# record_dict["nmi"] = []
# record_dict["mi"] = []
# record_dict["nk"] = []
#
# # Loop over iterations
# for i_iter in range(n_iter):
#
# # Loop over data items
# # import random
# # permuted = range(self.components.N)
# # random.shuffle(permuted)
# # for i in permuted:
# for i in xrange(self.components.N):
#
# # Cache some old values for possible future use
# k_old = self.components.assignments[i]
# K_old = self.components.K
# stats_old = self.components.cache_component_stats(k_old)
#
# # Remove data vector `X[i]` from its current component
# self.components.del_item(i)
#
# # Compute log probability of `X[i]` belonging to each component
# log_prob_z = np.zeros(self.components.K + 1, np.float)
# # (25.35) in Murphy, p. 886
# log_prob_z[:self.components.K] = np.log(self.components.counts[:self.components.K])
# # (25.33) in Murphy, p. 886
# log_prob_z[:self.components.K] += self.components.log_post_pred(i)
# # Add one component to which nothing has been assigned
# log_prob_z[-1] = math.log(self.alpha) + self.components.cached_log_prior[i]
# prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))
#
# # Sample the new component assignment for `X[i]`
# k = utils.draw(prob_z)
# # logger.debug("Sampled k = " + str(k) + " from " + str(prob_z) + ".")
#
# # Add data item X[i] into its component `k`
# if k == k_old and self.components.K == K_old:
# # Assignment same and no components have been removed
# self.components.restore_component_from_stats(k_old, *stats_old)
# self.components.assignments[i] = k_old
# else:
# # Add data item X[i] into its new component `k`
# self.components.add_item(i, k)
#
# # Update record
# record_dict["sample_time"].append(time.time() - start_time)
# start_time = time.time()
# record_dict["log_marg"].append(self.log_marg())
# record_dict["components"].append(self.components.K)
# nmi = normalized_mutual_info_score(_true_assignment, self.components.assignments)
# record_dict["nmi"].append(nmi)
# mi = mutual_info_score(_true_assignment, self.components.assignments)
# record_dict["mi"].append(mi)
# record_dict["nk"].append(self.components.counts[:self.components.K])
#
# # Log info
# info = "iteration: " + str(i_iter)
# for key in sorted(record_dict):
# info += ", " + key + ": " + str(record_dict[key][-1])
# # info += ", nmi: " + str(nmi)
# info += "."
# logger.info(info)
#
# return record_dict
# def approx_sampling(self, n_iter, _true_assignment, approx_thres_perct=0.04, approx_burnin=200, num_saved=3):
#
# return self.constrained_gibbs_sample(n_iter, _true_assignment,
# flag_constrain=False, n_constrain=1000000, thres=0,
# flag_power=False, n_power=1, power_burnin=1000000,
# flag_loss=False, n_loss_step=1000000, flag_marg=True, loss_burnin=1000000,
# flag_approx=True, approx_thres_perct=approx_thres_perct,
# approx_burnin=approx_burnin,
# num_saved=num_saved,)
#
# def ada_pcrp_sampling(self, n_iter, _true_assignment, r_up=1.1, adapcrp_perct=0.04, adapcrp_burnin=500,
# num_saved=3):
# return self.constrained_gibbs_sample(n_iter, _true_assignment,
# flag_constrain=False, n_constrain=1000000, thres=0,
# flag_power=False, n_power=1, power_burnin=1000000,
# flag_loss=False, n_loss_step=1000000, flag_marg=True, loss_burnin=1000000,
# flag_approx=False, approx_thres_perct=0,
# approx_burnin=1000000,
# flag_adapcrp=True, r_up=r_up, adapcrp_perct=adapcrp_perct,
# adapcrp_burnin=adapcrp_burnin,
# num_saved=num_saved,)
#
# def ada_pcrp_sampling_form2(self, n_iter, _true_assignment, r_up=1.1, adapcrp_perct=0.04, adapcrp_burnin=500,
# num_saved=3):
# return self.constrained_gibbs_sample(n_iter, _true_assignment,
# flag_constrain=False, n_constrain=1000000, thres=0,
# flag_power=False, n_power=1, power_burnin=1000000,
# flag_loss=False, n_loss_step=1000000, flag_marg=True, loss_burnin=1000000,
# flag_approx=False, approx_thres_perct=0,
# approx_burnin=1000000,
# flag_adapcrp=False, r_up=r_up, adapcrp_perct=adapcrp_perct,
# adapcrp_burnin=adapcrp_burnin,
# flag_adapcrp_form2=True,
# num_saved=num_saved,)
#
# def loss_ada_pcrp_sampling(self, n_iter, _true_assignment, r_up=1.2, adapcrp_step=0.01, adapcrp_burnin=500,
# num_saved=3):
# return self.constrained_gibbs_sample(n_iter, _true_assignment,
# flag_constrain=False, n_constrain=1000000, thres=0.,
# flag_power=False, n_power=1, power_burnin=100000,
# flag_loss=False, n_loss_step=1000000, flag_marg=True, loss_burnin=10000000,
# flag_approx=False, approx_thres_perct=0., approx_burnin=1000000,
# flag_adapcrp=False, r_up=1., adapcrp_perct=0., adapcrp_burnin=1000000,
# flag_adapcrp_form2=False,
# flag_loss_adapcrp=True, r_up_losspcrp=r_up, lossadapcrp_step=adapcrp_step,
# lossadapcrp_burnin=adapcrp_burnin,
# num_saved=num_saved, weight_first=True)
# # @profile
# def constrained_gibbs_sample(self, n_iter, true_assignments,
# flag_constrain=False, n_constrain=1000000, thres=0.,
# flag_power=False, n_power=1, power_burnin=100000,
# flag_loss=False, n_loss_step=1000000, flag_marg=True, loss_burnin=10000000,
# flag_approx=False, approx_thres_perct=0., approx_burnin=1000000,
# flag_adapcrp=False, r_up=1., adapcrp_perct=0., adapcrp_burnin=1000000,
# flag_adapcrp_form2=False,
# flag_loss_adapcrp=False, r_up_losspcrp=1., lossadapcrp_step=0.,
# lossadapcrp_burnin=1000000,
# num_saved=3, weight_first=True):
# """
# Perform `n_iter` iterations Gibbs sampling on the IGMM.
#
# A record dict is constructed over the iterations, which contains
# several fields describing the sampling process. Each field is described
# by its key and statistics are given in a list which covers the Gibbs
# sampling iterations. This dict is returned.
# """
#
# # Setup record dictionary
# record_dict = self.setup_record_dict()
# start_time = time.time()
# distribution_dict = {}
#
# # record_dict["sample_time"] = []
# # start_time = time.time()
# # record_dict["log_marg"] = []
# # record_dict["components"] = []
# # record_dict["nmi"] = []
# # record_dict["mi"] = []
# # record_dict["nk"] = []
# # record_dict["loss"] = []
# # record_dict['bic'] = []
# # record_dict["vi"] = []
#
# distribution_dict["mean"] = np.zeros(shape=(num_saved, 0))
# distribution_dict["variance"] = np.zeros(shape=(num_saved, 0))
# distribution_dict["weights"] = np.zeros(shape=(num_saved, 0))
#
# dist_idx = 0
#
# constrain_thres = self.components.N * thres
#
#
#
# if flag_loss_adapcrp:
# smallest_loss_adapcrp = utils.cluster_loss_inertia(self.components.X, self.components.assignments)
# r_lossadapcrp = 1. ## initial power
#
# all_noise_data = []
#
# # Loop over iterations
# for i_iter in range(n_iter):
# # print 'iter: {}'.format(i_iter)
#
# isNoiseAnalysis = False
# if isNoiseAnalysis:
# # logging.info('clusters:{}'.format(self.components.counts[:self.components.K]))
# small_cluster_idx = np.where(self.components.counts[:self.components.K]<=1)[0]
# # logging.info('less than 2:{}'.format(small_cluster_idx))
# # logging.info('assignments: {}'.format(collections.Counter(self.components.assignments)))
#
# data_idx = [i for i,row in enumerate(self.components.assignments) if row in small_cluster_idx]
# logging.info("data idx:{}".format(data_idx))
#
# all_noise_data = all_noise_data + data_idx
# logging.info("unique idx:{}".format(np.unique(all_noise_data)))
#
#
# ## save the wanted distribution
# if num_saved == self.components.K and i_iter>1:
#
# ### dimension = 2; save plot
# if self.D == 2:
# ## Plot results
# fig = plt.figure()
# ax = fig.add_subplot(111)
# plot_mixture_model(ax, self)
# plt.savefig(self.save_path + '/model.png')
# plt.savefig(self.save_path + '/model.pdf')
# for k in xrange(self.components.K):
# mu, sigma = self.components.map(k)
# plot_ellipse(ax, mu, sigma)
# # plt.show()
# plt.savefig(self.save_path + '/ellipse.png')
# plt.savefig(self.save_path + '/ellipse.pdf')
#
# ## get mean, sd and weights
# means = []
# sds = []
# for k in xrange(self.components.K):
# mu, sigma = self.components.map(k)
# means.append(mu)
# sds.append(sigma)
#
# if weight_first:
# ## label switching index
# weights = self.gibbs_weight()
# idx = np.argsort(weights)
#
# sds = np.array(sds).flatten()
# means = np.array(means).flatten()
# # weights = self.gibbs_weight()
#
# # label switching
# means = self.label_switch(idx, means)
# sds = self.label_switch(idx, sds)
# weights = self.label_switch(idx, weights)
# else:
# ## label switching index
# means = np.array(means).flatten()
# idx = np.argsort(means)
#
# sds = np.array(sds).flatten()
# weights = self.gibbs_weight()
#
# # label switching
# means = self.label_switch(idx, means)
# sds = self.label_switch(idx, sds)
# weights = self.label_switch(idx, weights)
#
# # back up for next iteration
# self.old_mean = means
# self.old_sigma = sds
#
# means = means.reshape((means.shape[0], 1))
# sds = sds.reshape((sds.shape[0],1))
# weights = weights.reshape((weights.shape[0],1))
# distribution_dict["mean"] = np.hstack((distribution_dict["mean"], means))
# distribution_dict["variance"] = np.hstack((distribution_dict["variance"], sds))
# distribution_dict["weights"] = np.hstack((distribution_dict["weights"], weights))
#
# dist_idx = dist_idx + 1
#
#
#
#
# if flag_constrain:
# if i_iter % n_constrain == 0:
# logging.info('performing constrain step')
# logging.info('all cluster nk: {}'.format(self.components.counts[:self.components.K]))
# isConstrained = True
# tmp_useful_cluster_num = []
# tmp_nonuseful_cluster_num = []
# for i_cluster in range(self.components.K):
# if self.components.counts[i_cluster] > constrain_thres:
# tmp_useful_cluster_num.append(i_cluster)
# else:
# tmp_nonuseful_cluster_num.append(i_cluster)
# else:
# isConstrained = False
# # print self.components.K
# # print self.components.counts
#
# if flag_loss and i_iter % n_loss_step == 0 and i_iter > loss_burnin:
# copy_components = copy.deepcopy(self.components)
# if flag_marg:
# max_prob = float('-inf')
# max_prob_components = copy_components
# else:
# min_loss = float('+inf')
# min_loss_components = copy_components
#
# loss_cnt=0
# while copy_components.K > 2:
# loss_cnt += 1
# if loss_cnt > 50:
# break
# # print "iter: {}".format(i_iter)
# # print "1: {}".format(copy_components.K)
#
# # because we need to assign the copy to max_components
# copy_components = copy.deepcopy(copy_components)
#
# loss_nonuseful_cluster_idx = np.argmin(copy_components.counts[:copy_components.K])
# loss_useful_cluster_num = []
#
# for i_cluster in range(copy_components.K):
# if i_cluster != loss_nonuseful_cluster_idx:
# loss_useful_cluster_num.append(i_cluster)
# # tmp_counts = copy_components.counts[:copy_components.K]
# # tmp_counts[loss_nonuseful_cluster_idx] = 0
# # print copy_components.counts[:copy_components.K]
# # print loss_useful_cluster_num
#
# for i in xrange(copy_components.N):
#
# # Cache some old values for possible future use
# k_old = copy_components.assignments[i]
# K_old = copy_components.K
# stats_old = copy_components.cache_component_stats(k_old)
#
# # Remove data vector `X[i]` from its current component
# copy_components.del_item(i)
#
# # Compute log probability of `X[i]` belonging to each component
# log_prob_z = np.zeros(copy_components.K + 1, np.float)
#
# log_prob_z[:copy_components.K] = np.log(copy_components.counts[:copy_components.K])
# # (25.33) in Murphy, p. 886
# log_prob_z[:copy_components.K] += copy_components.log_post_pred(i)
# # Add one component to which nothing has been assigned
# log_prob_z[-1] = math.log(self.alpha) + copy_components.cached_log_prior[i]
# prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))
#
# # Sample the new component assignment for `X[i]`
# k = utils.draw_rand(prob_z)
#
# loss_loop_data_cnt = 0
# if k_old in loss_useful_cluster_num:
# k = k_old
# else:
# while k not in loss_useful_cluster_num:
# loss_loop_data_cnt += 1
# if loss_loop_data_cnt >= 100:
# break
# # print '2: {}'.format(k)
# k = utils.draw_rand(prob_z)
#
# # Add data item X[i] into its component `k`
# if k == k_old and copy_components.K == K_old:
# # Assignment same and no components have been removed
# copy_components.restore_component_from_stats(k_old, *stats_old)
# copy_components.assignments[i] = k_old
# else:
# # Add data item X[i] into its new component `k`
# copy_components.add_item(i, k)
#
# ## TODO: move out
# if flag_marg:
# log_prob = self.log_marg_for_copy(copy_components)
# if log_prob > max_prob:
# max_prob = log_prob
# max_prob_components = copy_components
# else:
# loss_local = utils.cluster_loss_inertia(copy_components.X, copy_components.assignments)
# if loss_local < min_loss:
# min_loss = loss_local
# min_loss_components = copy_components
#
# if flag_adapcrp_form2 and i_iter > adapcrp_burnin: # for ada-pCRP
# adapcrp_thres = self.components.N * adapcrp_perct
# adapcrp_nk = self.components.counts[:self.components.K]
# small_perct = len(adapcrp_nk[np.where(adapcrp_nk <= adapcrp_thres)[0]]) * 1.0 / len(adapcrp_nk)
# adapcrp_power_form2 = 1.0 + (r_up - 1.0) * small_perct
# if i_iter % 20 == 0:
# logging.info('Ada-pCRP power: {}'.format(adapcrp_power_form2))
#
# ## parameter prepare for 'loss_adapcrp'
# if flag_loss_adapcrp and i_iter > lossadapcrp_burnin:
# this_loss = utils.cluster_loss_inertia(self.components.X, self.components.assignments)
# if this_loss < smallest_loss_adapcrp:
# r_lossadapcrp -= lossadapcrp_step
# smallest_loss_adapcrp = this_loss
# else:
# r_lossadapcrp += lossadapcrp_step
#
# if r_lossadapcrp < 1.:
# r_lossadapcrp = 1.
# if r_lossadapcrp > r_up_losspcrp:
# r_lossadapcrp = r_up_losspcrp
#
#
# if i_iter % 20 == 0:
# logging.info('smallest loss: {}'.format(smallest_loss_adapcrp))
# logging.info('loss: {}'.format(this_loss))
# logging.info('power: {}'.format(r_lossadapcrp))
#
# if flag_power and n_power>1:
# if i_iter % 20 == 0:
# print "permutate data"
# data_loop_list = np.random.permutation(xrange(self.components.N))
# else:
# data_loop_list = xrange(self.components.N)
# ## Loop over data items
# for i in data_loop_list:
#
# # Cache some old values for possible future use
# k_old = self.components.assignments[i]
# K_old = self.components.K
# stats_old = self.components.cache_component_stats(k_old)
#
# # Remove data vector `X[i]` from its current component
# self.components.del_item(i)
#
# # Compute log probability of `X[i]` belonging to each component
# log_prob_z = np.zeros(self.components.K + 1, np.float)
# if flag_power and i_iter > power_burnin:
# ## for pCRP
# log_prob_z[:self.components.K] = np.log(np.power(self.components.counts[:self.components.K],n_power))
# elif flag_adapcrp and i_iter > adapcrp_burnin:
# ## for ada-pCRP
# adapcrp_thres = self.components.N * adapcrp_perct
# adapcrp_nk = self.components.counts[:self.components.K]
# small_perct = len(adapcrp_nk[np.where(adapcrp_nk<=adapcrp_thres)[0]]) * 1.0 /len(adapcrp_nk)
# adapcrp_power = 1.0 + (r_up-1.0)*small_perct
# # logging.info('Ada-pCRP power: {}'.format(adapcrp_power))
# log_prob_z[:self.components.K] = np.log(
# np.power(self.components.counts[:self.components.K], adapcrp_power))
# elif flag_adapcrp_form2 and i_iter > adapcrp_burnin:
# ## for ada-pCRP form2
# log_prob_z[:self.components.K] = np.log(
# np.power(self.components.counts[:self.components.K], adapcrp_power_form2))
# elif flag_loss_adapcrp and i_iter > lossadapcrp_burnin:
# ## for loss-ada-pCRP
# log_prob_z[:self.components.K] = np.log(
# np.power(self.components.counts[:self.components.K], r_lossadapcrp))
# else:
# ## plain gibbs sampling
# # (25.35) in Murphy, p. 886
# log_prob_z[:self.components.K] = np.log(self.components.counts[:self.components.K])
# # (25.33) in Murphy, p. 886
# log_prob_z[:self.components.K] += self.components.log_post_pred(i)
# # Add one component to which nothing has been assigned
# log_prob_z[-1] = math.log(self.alpha) + self.components.cached_log_prior[i]
# prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))
#
# # Sample the new component assignment for `X[i]`
# k = utils.draw(prob_z)
# # logger.debug("Sampled k = " + str(k) + " from " + str(prob_z) + ".")
#
# if flag_constrain:
# if isConstrained:
# # logging.info('performing constrained reassign')
# if k_old in tmp_nonuseful_cluster_num:
# k = utils.draw(prob_z)
# while k not in tmp_useful_cluster_num:
# k = utils.draw(prob_z)
# else:
# k = k_old
#
# # Add data item X[i] into its component `k`
# if k == k_old and self.components.K == K_old:
# # Assignment same and no components have been removed
# self.components.restore_component_from_stats(k_old, *stats_old)
# self.components.assignments[i] = k_old
# else:
# # Add data item X[i] into its new component `k`
# self.components.add_item(i, k)
# ## end loop data
#
# ## noise proof
# isNoiseProof = False
# if isNoiseProof:
# noise_useful_cluster_num = []
# noise_nonuseful_cluster_num = []
# for i_cluster in range(self.components.K):
# if self.components.counts[i_cluster] == 1:
# noise_nonuseful_cluster_num.append(i_cluster)
# else:
# noise_useful_cluster_num.append(i_cluster)
#
# # logging.info('clusters:{}'.format(self.components.counts[:self.components.K]))
# small_cluster_idx = np.where(self.components.counts[:self.components.K] == 1)[0]
#
#
# small_data_idx = [i for i, row in enumerate(self.components.assignments) if row in small_cluster_idx]
#
# ## Loop over data items
# for i in small_data_idx:
#
# # Cache some old values for possible future use
# k_old = self.components.assignments[i]
# K_old = self.components.K
# stats_old = self.components.cache_component_stats(k_old)
#
# # Remove data vector `X[i]` from its current component
# self.components.del_item(i)
#
# # Compute log probability of `X[i]` belonging to each component
# log_prob_z = np.zeros(self.components.K + 1, np.float)
#
# ## plain gibbs sampling
# # (25.35) in Murphy, p. 886
# log_prob_z[:self.components.K] = np.log(self.components.counts[:self.components.K])
# # (25.33) in Murphy, p. 886
# log_prob_z[:self.components.K] += self.components.log_post_pred(i)
# # Add one component to which nothing has been assigned
# log_prob_z[-1] = math.log(self.alpha) + self.components.cached_log_prior[i]
# prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))
#
# k = utils.draw(prob_z)
# while k in small_cluster_idx:
# k = utils.draw(prob_z)
#
#
# # Add data item X[i] into its component `k`
# if k == k_old and self.components.K == K_old:
# # Assignment same and no components have been removed
# self.components.restore_component_from_stats(k_old, *stats_old)
# self.components.assignments[i] = k_old
# else:
# # Add data item X[i] into its new component `k`
# self.components.add_item(i, k)
# ## end noise proof
#
#
# if flag_approx and i_iter > approx_burnin:
# approx_thres = self.components.N * approx_thres_perct
# if i_iter % 20 == 0:
# logging.info('performing approx step')
# logging.info('all cluster nk: {}'.format(self.components.counts[:self.components.K]))
# approx_useful_cluster_num = []
# approx_nonuseful_cluster_num = []
# for i_cluster in range(self.components.K):
# if self.components.counts[i_cluster] > approx_thres:
# approx_useful_cluster_num.append(i_cluster)
# else:
# approx_nonuseful_cluster_num.append(i_cluster)
#
# # Loop over data items
# for i in xrange(self.components.N):
#
# # Cache some old values for possible future use
# k_old = self.components.assignments[i]
# K_old = self.components.K
# stats_old = self.components.cache_component_stats(k_old)
#
# # Remove data vector `X[i]` from its current component
# self.components.del_item(i)
#
# # Compute log probability of `X[i]` belonging to each component
# log_prob_z = np.zeros(self.components.K + 1, np.float)
# # (25.35) in Murphy, p. 886
# log_prob_z[:self.components.K] = np.log(self.components.counts[:self.components.K])
# # (25.33) in Murphy, p. 886
# log_prob_z[:self.components.K] += self.components.log_post_pred(i)
# # Add one component to which nothing has been assigned
# log_prob_z[-1] = math.log(self.alpha) + self.components.cached_log_prior[i]
# prob_z = np.exp(log_prob_z - logsumexp(log_prob_z))
#
# # Sample the new component assignment for `X[i]`
# k = utils.draw(prob_z)
#
# ## approx sampling step
# if k_old in approx_nonuseful_cluster_num:
# k = utils.draw(prob_z)
# while k not in approx_useful_cluster_num:
# k = utils.draw(prob_z)
# else:
# k = k_old
#
# # Add data item X[i] into its component `k`
# if k == k_old and self.components.K == K_old:
# # Assignment same and no components have been removed
# self.components.restore_component_from_stats(k_old, *stats_old)
# self.components.assignments[i] = k_old
# else:
# # Add data item X[i] into its new component `k`
# self.components.add_item(i, k)
#
#
#
# if flag_loss and i_iter % n_loss_step == 0 and i_iter>loss_burnin:
# if flag_marg:
# self.components = max_prob_components
# else:
# self.components = min_loss_components
#
# # Update record
# record_dict = self.update_record_dict(record_dict, i_iter, true_assignments, start_time)
# start_time = time.time()
#
# # record_dict["sample_time"].append(time.time() - start_time)
# # start_time = time.time()
# # record_dict["log_marg"].append(self.log_marg())
# # record_dict["components"].append(self.components.K)
# # nmi = normalized_mutual_info_score(_true_assignment, self.components.assignments)
# # record_dict["nmi"].append(nmi)
# # mi = mutual_info_score(_true_assignment, self.components.assignments)
# # record_dict["mi"].append(mi)
# # record_dict["nk"].append(self.components.counts[:self.components.K])
# # loss = utils.cluster_loss_inertia(self.components.X, self.components.assignments)
# # record_dict["loss"].append(loss)
# #
# # bic = utils.cluster_loss_inertia(self.components.X, self.components.assignments)
# # record_dict["bic"].append(bic)
# #
# # vi = information_variation(_true_assignment, self.components.assignments)
# # record_dict["vi"].append(vi)
# # Log info
# # if i_iter % 20 ==0:
# # info = "iteration: " + str(i_iter)
# # for key in sorted(record_dict):
# # info += ", " + key + ": " + str(record_dict[key][-1])
# # # info += ", nmi: " + str(nmi)
# # info += "."
# # logger.info(info)
#
# return record_dict, distribution_dict
| StarcoderdataPython |
6638170 | import dwavebinarycsp
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
sampler = EmbeddingComposite(DWaveSampler())
def scheduling(time, location, length, mandatory):
if time:
return (location and mandatory)
else:
return ((not location) and mandatory)
csp = dwavebinarycsp.ConstraintSatisfactionProblem(dwavebinarycsp.BINARY)
csp.add_constraint(scheduling, ['time', 'location', 'length', 'mandatory'])
bqm = dwavebinarycsp.stitch(csp)
print(bqm.linear)
print(bqm.quadratic)
response = sampler.sample(bqm, num_reads = 5000)
min_energy = next(response.data(['energy']))[0]
total = 0
for sample, energy, occurences in response.data(['sample', 'energy', 'num_occurrences']):
total += occurences
if energy == min_energy:
time = 'business hours' if sample['time'] else 'evenings'
location = 'office' if sample['location'] else 'home'
length = 'short' if sample['length'] else 'long'
mandatory = 'mandatory' if sample['mandatory'] else 'optional'
print(f"{occurences}: During {time} at {location}, you can schedule a {length} meeting that is {mandatory}.\n")
| StarcoderdataPython |
11296446 | <filename>djangocms_blog/fields.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.utils.text import slugify as django_slugify
__all__ = ['slugify']
def slugify(base):
return django_slugify(base, allow_unicode=True)
| StarcoderdataPython |
8107092 | from newt import (
Queue
)
def test_basic():
queue = Queue(2)
async_queue = queue.async_queue
sync_queue = queue.sync_queue
assert async_queue.maxsize == 2
assert async_queue.empty()
assert sync_queue.maxsize == 2
assert sync_queue.empty()
# @pytest.mark.asyncio
# async def test_close_with_pending():
# q = Queue()
# await q.async_queue.put(1)
# q.close()
# await q.wait_closed()
| StarcoderdataPython |
1759242 | <gh_stars>1-10
##################################################
# Import Own Assets
##################################################
from hyperparameter_hunter.library_helpers.keras_optimization_helper import (
clean_parenthesized_string,
consolidate_layers,
find_space_fragments,
merge_compile_params,
rewrite_model_builder,
)
from hyperparameter_hunter import Real, Categorical
##################################################
# Import Miscellaneous Assets
##################################################
from collections import OrderedDict
import pytest
##################################################
# `consolidate_layers` Scenarios
##################################################
#################### Parametrization Helper Dicts ####################
default_dense = {
"activation": None,
"use_bias": True,
"kernel_initializer": "glorot_uniform",
"bias_initializer": "zeros",
"kernel_regularizer": None,
"bias_regularizer": None,
"activity_regularizer": None,
"kernel_constraint": None,
"bias_constraint": None,
}
default_dropout = {"noise_shape": None, "seed": None}
simple_mlp_layers = [
{
"class_name": "Dense",
"__hh_default_args": ["units"],
"__hh_default_kwargs": default_dense,
"__hh_used_args": [100],
"__hh_used_kwargs": dict(kernel_initializer="uniform", input_shape=[30], activation="relu"),
},
{
"class_name": "Dropout",
"__hh_default_args": ["rate"],
"__hh_default_kwargs": default_dropout,
"__hh_used_args": [0.5],
"__hh_used_kwargs": {},
},
{
"class_name": "Dense",
"__hh_default_args": ["units"],
"__hh_default_kwargs": default_dense,
"__hh_used_args": [],
"__hh_used_kwargs": dict(units=1, kernel_initializer="uniform", activation="sigmoid"),
},
]
dense_0_kwargs = dict(default_dense, **dict(activation="relu", kernel_initializer="uniform"))
dense_1_kwargs = dict(default_dense, **dict(activation="sigmoid", kernel_initializer="uniform"))
#################### Expected Layers ####################
expected_layers_both_true = [
dict(class_name="Dense", arg_vals={"units": 100}, kwarg_vals=dense_0_kwargs),
dict(class_name="Dropout", arg_vals={"rate": 0.5}, kwarg_vals=default_dropout),
dict(class_name="Dense", arg_vals={"units": 1}, kwarg_vals=dense_1_kwargs),
]
expected_layers_class_name_key_false = [
{"Dense": dict(arg_vals={"units": 100}, kwarg_vals=dense_0_kwargs)},
{"Dropout": dict(arg_vals={"rate": 0.5}, kwarg_vals=default_dropout)},
{"Dense": dict(arg_vals={"units": 1}, kwarg_vals=dense_1_kwargs)},
]
expected_layers_split_args_false = [
dict(**dict(class_name="Dense", units=100), **dense_0_kwargs),
dict(**dict(class_name="Dropout", rate=0.5), **default_dropout),
dict(**dict(class_name="Dense", units=1), **dense_1_kwargs),
]
expected_layers_both_false = [
{"Dense": dict(**dict(units=100), **dense_0_kwargs)},
{"Dropout": dict(**dict(rate=0.5), **default_dropout)},
{"Dense": dict(**dict(units=1), **dense_1_kwargs)},
]
#################### Test `consolidate_layers` Equality ####################
@pytest.mark.parametrize(
["expected", "class_name_key", "split_args"],
[
pytest.param(expected_layers_both_true, True, True, id="both=true"),
pytest.param(expected_layers_class_name_key_false, False, True, id="class_name_key=False"),
pytest.param(expected_layers_split_args_false, True, False, id="split_args=False"),
pytest.param(expected_layers_both_false, False, False, id="both=false"),
],
)
def test_consolidate_layers(expected, class_name_key, split_args):
assert consolidate_layers(simple_mlp_layers, class_name_key, split_args) == expected
##################################################
# `merge_compile_params` Scenarios
##################################################
@pytest.mark.parametrize(
"compile_params",
[
{
"compile_kwargs": {},
"loss_function_names": ["binary_crossentropy"],
"loss_functions": ["<binary_crossentropy function>"],
"loss_weights": None,
"metrics": ["accuracy"],
"metrics_names": ["loss", "acc"],
"optimizer": "Adam",
"optimizer_params": dict(
amsgrad=False, beta_1=0.9, beta_2=0.999, decay=0.0, epsilon=1e-07, lr=0.001
),
"sample_weight_mode": None,
"target_tensors": None,
"weighted_metrics": None,
}
],
)
@pytest.mark.parametrize(
["dummified_params", "expected"],
[
(
{("params", "optimizer"): Categorical(categories=("adam", "rmsprop"))},
{
"compile_kwargs": {},
"loss_function_names": ["binary_crossentropy"],
"loss_functions": ["<binary_crossentropy function>"],
"loss_weights": None,
"metrics": ["accuracy"],
"metrics_names": ["loss", "acc"],
"optimizer": Categorical(categories=("adam", "rmsprop")),
"optimizer_params": dict(
amsgrad=False, beta_1=0.9, beta_2=0.999, decay=0.0, epsilon=1e-07, lr=0.001
),
"sample_weight_mode": None,
"target_tensors": None,
"weighted_metrics": None,
},
),
(
{("params", "optimizer_params", "lr"): Real(0.0001, 0.1)},
{
"compile_kwargs": {},
"loss_function_names": ["binary_crossentropy"],
"loss_functions": ["<binary_crossentropy function>"],
"loss_weights": None,
"metrics": ["accuracy"],
"metrics_names": ["loss", "acc"],
"optimizer": "Adam",
"optimizer_params": dict(
amsgrad=False,
beta_1=0.9,
beta_2=0.999,
decay=0.0,
epsilon=1e-07,
lr=Real(0.0001, 0.1),
),
"sample_weight_mode": None,
"target_tensors": None,
"weighted_metrics": None,
},
),
],
)
def test_merge_compile_params(compile_params, dummified_params, expected):
assert merge_compile_params(compile_params, dummified_params) == expected
##################################################
# `clean_parenthesized_string` Scenarios
##################################################
@pytest.mark.parametrize(
"expected",
["Categorical([Dropout(0.5), Activation('linear')])", "Dense(Integer(256, 1024))"],
# Notice, the beginning remains un-trimmed, despite not starting with `space` class
)
def test_clean_parenthesized_string(expected):
assert clean_parenthesized_string(expected + " ... I am some extra text") == expected
@pytest.mark.parametrize(
"string", ["Categorical([Dropout(0.5), Activation('linear')]", "Dense(Integer(256, (1024))"]
)
def test_clean_parenthesized_string_value_error(string):
with pytest.raises(ValueError):
clean_parenthesized_string(string)
##################################################
# Build Function/Space Fragment-Finding Objects
##################################################
# The objects defined in this section may be used in a number of tests of different functionality
_expected_params_0 = OrderedDict(
[
("rate", "Real(0.0, 1.0)"),
("units", "Integer(256, 1024)"),
("kernel_initializer", "Categorical(['glorot_uniform', 'lecun_normal'])"),
("Activation", "Categorical(['relu', 'sigmoid'], transform='onehot')"),
("Dropout", "Real(low=0.0, high=1.0)"),
("loss", "Categorical(['categorical_crossentropy', 'binary_crossentropy'])"),
("optimizer", "Categorical(['rmsprop', 'adam', 'sgd'], transform='onehot')"),
]
)
_expected_params_1 = OrderedDict(
[
("Dropout", "Real(0.0, 1.0)"),
("Dense", "Integer(256, 1024)"),
("Activation", "Categorical(['relu', 'sigmoid'], transform='onehot')"),
("Dropout_1", "Real(low=0.0, high=1.0)"),
("Dropout_2", "Categorical(categories=['three', 'four'])"),
# FLAG: Above is weird, but probably ok, maybe
("add", "Categorical([Dropout(0.5), Activation('linear')])"),
# TODO: Above layer selection not fully supported
("loss", "Categorical(['categorical_crossentropy', 'binary_crossentropy'])"),
("optimizer", "Categorical(['rmsprop', 'adam', 'sgd'], transform='identity')"),
]
)
def _build_fn_source_0(stage):
stage_vals = [(_v, "params[{!r}]".format(_k)) for _k, _v in _expected_params_0.items()]
src = """def {7}:
model = Sequential()
model.add(Dense(512, input_shape=input_shape))
model.add(Activation('relu'))
model.add(Dropout(rate={0}, seed=32))
model.add(Dense(units={1}, kernel_initializer={2}))
model.add(Activation({3}))
model.add(Dropout({4}))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(
loss={5},
metrics=['accuracy'],
optimizer={6}
)
return model"""
if stage == "original":
return src.format(*[_[0] for _ in stage_vals], "create_model(input_shape)")
elif stage == "reusable":
return src.format(*[_[1] for _ in stage_vals], "build_fn(input_shape=(10, ), params=None)")
def _build_fn_source_1(stage):
stage_vals = [(_v, "params[{!r}]".format(_k)) for _k, _v in _expected_params_1.items()]
src = '''def {8}:
"""Keras model-building function that contains hyperparameter space declarations
Parameters
----------
input_shape: Int
The shape of the input provided to the first layer of the model
Returns
-------
model: Instance of :class:`keras.Sequential`
A compiled Keras model"""
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({0}))
model.add(Dense({1}))
model.add(Activation({2}))
model.add(Dropout({3}))
if {4} == 'four':
model.add(Dense(100))
model.add({5})
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(
loss={6},
metrics=['accuracy'],
optimizer={7}
)
return model'''
if stage == "original":
return src.format(*[_[0] for _ in stage_vals], "create_model(input_shape)")
elif stage == "reusable":
return src.format(*[_[1] for _ in stage_vals], "build_fn(input_shape=(10, ), params=None)")
##################################################
# `rewrite_model_builder` Scenarios
##################################################
@pytest.mark.parametrize(
["src_str", "expected_src_str", "expected_params"],
[
[_build_fn_source_0("original"), _build_fn_source_0("reusable"), _expected_params_0],
[_build_fn_source_1("original"), _build_fn_source_1("reusable"), _expected_params_1],
],
ids=["0", "1"],
)
def test_rewrite_model_builder(src_str, expected_src_str, expected_params):
assert rewrite_model_builder(src_str) == (expected_src_str, expected_params)
##################################################
# `find_space_fragments` Scenarios
##################################################
@pytest.mark.parametrize(
["string", "expected_choices", "expected_names", "expected_indexes"],
[
(
_build_fn_source_0("original"),
list(_expected_params_0.values()),
list(_expected_params_0.keys()),
[168, 220, 259, 334, 411, 533, 647],
),
(
_build_fn_source_1("original"),
list(_expected_params_1.values()),
list(_expected_params_1.keys()),
[474, 511, 557, 634, 668, 769, 954, 1070],
),
],
ids=["0", "1"],
)
def test_find_space_fragments(string, expected_choices, expected_names, expected_indexes):
assert find_space_fragments(string) == (expected_choices, expected_names, expected_indexes)
| StarcoderdataPython |
3497263 | <reponame>Tanc009/jdcloud-sdk-python
# coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class OrderInfo(object):
def __init__(self, pin=None, packageType=None, validity=None, specs=None, quantity=None, totalFee=None, payTime=None):
"""
:param pin: (Optional) 用户名
:param packageType: (Optional) 资源包类型
:param validity: (Optional) 时长
:param specs: (Optional) 规格
:param quantity: (Optional) 资源包数量
:param totalFee: (Optional) 价格
:param payTime: (Optional) 支付时间
"""
self.pin = pin
self.packageType = packageType
self.validity = validity
self.specs = specs
self.quantity = quantity
self.totalFee = totalFee
self.payTime = payTime
| StarcoderdataPython |
6581851 | <reponame>tomhodgins/xpath-helpers
## LISP-style XPath Helpers
## Nodes
def rootNode(continuation=''):
return '/' + continuation
def anyNode(continuation=''):
return '//' + continuation
def currentNode(continuation=''):
return '/.' + continuation
def parentNode(continuation=''):
return '/..' + continuation
## Tags
def tag(string1, continuation=''):
return string1 + continuation
def has(string1, continuation=''):
return '[' + string1 + ']' + continuation
def isNot(string1, continuation=''):
return 'not(' + string1 + ')' + continuation
def tagStartsWith(string1, continuation=''):
return '[starts-with(name(), "' + string1 + '")]' + continuation
## Attributes
def attrExists(string1, continuation=''):
return '[@' + string1 + ']' + continuation
def attrEquals(string1, string2, continuation=''):
return '[@' + string1 + '="' + string2 + '"]' + continuation
def attrContains(string1, string2, continuation=''):
return '[contains(@' + string1 + ', "' + string2 + '")]' + continuation
def attrGreater(string1, string2, continuation=''):
return '[@' + string1 + ' > ' + string2 + ']' + continuation
def attrGreaterEquals(string1, string2, continuation=''):
return '[@' + string1 + ' >= ' + string2 + ']' + continuation
def attrLesser(string1, string2, continuation=''):
return '[@' + string1 + ' < ' + string2 + ']' + continuation
def attrLesserEquals(string1, string2, continuation=''):
return '[@' + string1 + ' <= ' + string2 + ']' + continuation
def attrStartsWith(string1, continuation=''):
return '[@*[starts-with(name(), "' + string1 + '")]]' + continuation
## Text Functions
def containsText(string1, continuation=''):
return '[contains(text(), "' + string1 + '")]' + continuation
## AXES
## Documents
def preceding(continuation=''):
return 'preceding::' + continuation
def following(continuation=''):
return 'following::' + continuation
## Parents
def parent(continuation=''):
return 'parent::' + continuation
def ancestor(continuation=''):
return 'ancestor::' + continuation
## Self
def self(continuation=''):
return 'self::' + continuation
def ancestorOrSelf(continuation=''):
return 'ancestor-or-self::' + continuation
def descendantOrSelf(continuation=''):
return 'descendant-or-self::' + continuation
## Siblings
def precedingSibling(continuation=''):
return 'preceding-sibling::' + continuation
def followingSibling(continuation=''):
return 'following-sibling::' + continuation
def precedingSibling(continuation=''):
return 'preceding-sibling::' + continuation
## Children
def child(continuation=''):
return 'child::' + continuation
def descendant(continuation=''):
return 'descendant::' + continuation | StarcoderdataPython |
11235168 | <reponame>Diffeomorphic/import-daz
import cv2
import os
import argparse
def main():
parser = argparse.ArgumentParser(description="")
parser.add_argument("file", type=str, help="Name of input file.")
parser.add_argument("steps", type=int, help="Number of steps")
parser.add_argument("--overwrite", "-o", dest="overwrite", action="store_true")
args = parser.parse_args()
if args.steps == 0:
return
if args.steps < 0 or args.steps > 8:
print("Steps must be an integer between 1 and 8")
return
else:
factor = 0.5**args.steps
if args.overwrite:
newfile = args.file
else:
fname,ext = os.path.splitext(args.file)
if fname[-5:-1] == "-res" and fname[-1].isdigit():
fname = fname[:-5]
args.file = fname + ext
newfile = "%s-res%d%s" % (fname, args.steps, ext)
if not os.path.isfile(args.file):
print("The file %s does not exist" % args.file)
return
if os.path.isfile(newfile) and not args.overwrite:
print("%s already exists" % os.path.basename(newfile))
return
img = cv2.imread(args.file, cv2.IMREAD_UNCHANGED)
rows,cols = img.shape[0:2]
newrows = max(4, int(factor*rows))
newcols = max(4, int(factor*cols))
newimg = cv2.resize(img, (newcols,newrows), interpolation=cv2.INTER_AREA)
print("%s: (%d, %d) => (%d %d)" % (os.path.basename(newfile), rows, cols, newrows, newcols))
cv2.imwrite(os.path.join(args.file, newfile), newimg)
main()
| StarcoderdataPython |
1989055 | <reponame>sweetpalma/clrs
#!/usr/bin/env python3
# Part of CLRS solutions by SweetPalma, 2019. See LICENSE for details.
import unittest
import math
# Solution:
def modified_merge_sort(arr, p, r):
inversions = 0
if p < r:
q = math.floor((p + r) / 2)
inversions = inversions + modified_merge_sort(arr, p, q)
inversions = inversions + modified_merge_sort(arr, q + 1, r)
inversions = inversions + modified_merge(arr, p, q, r)
return inversions
# Solution:
def modified_merge(arr, p, q, r):
arr_left = arr[p:q+1]
arr_right = arr[q+1:r+1]
inversions = 0
i = j = 0
for k in range(p, r + 1):
if j >= len(arr_right):
arr[k] = arr_left[i]
i = i + 1
elif i >= len(arr_left):
arr[k] = arr_right[j]
j = j + 1
elif arr_left[i] <= arr_right[j]:
arr[k] = arr_left[i]
i = i + 1
elif arr_left[i] >= arr_right[j]:
inversions = inversions + (q - p + 1) - i
arr[k] = arr_right[j]
j = j + 1
return inversions
# Test:
class TestInsertions(unittest.TestCase):
def test_insertions(self):
self.assertEqual(5, modified_merge_sort([2, 3, 8, 6, 1], 0, 4))
self.assertEqual(3, modified_merge_sort([2, 4, 1, 3, 5], 0, 4))
# Runner:
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored', '-v'], exit=False)
| StarcoderdataPython |
5034765 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Sequence, Union
import torch
import torch.nn as nn
from .grad_sample_module import GradSampleModule
def register_grad_sampler(target_class_or_classes: Union[type, Sequence[type]]):
"""
Registers the decorated function as the ``grad_sampler`` of ``target_class_or_classes``, which is
the function that will be invoked every time you want to compute a per-sample gradient
of ``target_class_or_classes``. The signature of every grad_sampler is always the same:
>>> @register_grad_sampler(nn.MyCustomClass)
>>> def compute_grad_sample(module, activations, backprops):
>>> pass
It may help you to take a look at the existing grad_samplers inside Opacus, under ``opacus.grad_sample.``
"""
def decorator(f):
target_classes = (
target_class_or_classes
if isinstance(target_class_or_classes, Sequence)
else [target_class_or_classes]
)
for target_class in target_classes:
GradSampleModule.GRAD_SAMPLERS[target_class] = f
return f
return decorator
def create_or_extend_grad_sample(
param: torch.Tensor, grad_sample: torch.Tensor, batch_dim: int
) -> None:
"""
Creates a ``grad_sample`` attribute in the given parameter, or appends to it
if the ``grad_sample`` attribute already exists.
Args:
param: Parameter to which ``grad_sample`` will be added
grad_sample: Per-sample gradients tensor. Must be of the same
shape as ``param`` with extra batch dimension
batch_dim: Position of the batch dimension in the shape of
``grad_sample``
"""
if hasattr(param, "grad_sample"):
param.grad_sample = torch.cat((param.grad_sample, grad_sample), batch_dim)
else:
param.grad_sample = grad_sample
def create_or_accumulate_grad_sample(
param: torch.Tensor, grad_sample: torch.Tensor, layer: nn.Module
) -> None:
"""
Creates a ``grad_sample`` attribute in the given parameter, or adds to it
if the ``grad_sample`` attribute already exists.
Args:
param: Parameter to which ``grad_sample`` will be added
grad_sample: Per-sample gradients tensor. Must be of the same
shape as ``param`` with extra batch dimension
"""
if hasattr(param, "grad_sample"):
param.grad_sample[: grad_sample.shape[0]] += grad_sample
else:
max_batch_len = layer.max_batch_len
param.grad_sample = torch.zeros(
torch.Size([max_batch_len]) + grad_sample.shape[1:],
device=grad_sample.device,
dtype=grad_sample.dtype,
)
param.grad_sample[: grad_sample.shape[0]] = grad_sample
| StarcoderdataPython |
6694459 | <gh_stars>10-100
from typing import Dict
import numpy as np
from actor.correspondence_actor import CorrespondenceActor
from shape.point_2d import Point2D
from shape.polyline_2d import Polyline2D
from shape.shape import Shape
class Correspondence(object):
def __init__(self, shape: Shape = None,
reprojected_shape: Shape = None,
correspondence_id: int = 0):
self._shape = shape
self._reprojected_shape = reprojected_shape
self._timestamp = int()
self._is_on_road = True
self._actor = CorrespondenceActor()
# This id is used in calibration_optimizer to index the
# according correspondences.
self._id = correspondence_id
def from_json_dict(self, json_dict: Dict):
# FIXME: fix the point correspondence loading bug.
if "annotation_segment" in json_dict:
shape_coords = json_dict['annotation_segment']
vector_coords = json_dict['hd_map_segment']
reprojected_coords = np.zeros(np.array(shape_coords).shape)
self._shape = Polyline2D(
np.array(shape_coords).reshape(-1, 2))
self._reprojected_shape = Polyline2D(
np.array(reprojected_coords).reshape((-1, 2)))
self._reprojected_shape.set_origin_vertices(
np.array(vector_coords).reshape((-1, 3)))
else:
shape_coords = json_dict['annotation_point']
vector_coords = json_dict['hd_map_point']
reprojected_coords = np.zeros(np.array(shape_coords).shape)
self._shape = Point2D(
np.array(shape_coords).reshape(-1, 2))
self._reprojected_shape = Point2D(
np.array(reprojected_coords).reshape((-1, 2)))
self._reprojected_shape.set_origin_vertices(
np.array(vector_coords).reshape((-1, 3)))
self._id = int(json_dict["id"])
self._is_on_road = json_dict['is_on_road']
def to_json_dict(self) -> Dict:
# TODO: Maybe inherit to remove the judgements.
if self.is_line_correspondence():
shape_coords = self._shape.coords().tolist()
vector_coords = self._reprojected_shape.origin_vertices().tolist()
shape_key = "annotation_segment"
vector_key = "hd_map_segment"
else:
shape_coords = self._shape.coords()[0].tolist()
vector_coords = self._reprojected_shape.origin_vertices()[0].tolist()
shape_key = "annotation_point"
vector_key = "hd_map_point"
json_dict = {
shape_key: shape_coords,
vector_key: vector_coords,
'id': self._id,
'is_on_road': self.is_on_road(),
}
return json_dict
def is_valid(self):
return self._shape is not None and \
self._reprojected_shape is not None and \
self._shape.size() == self._reprojected_shape.size()
def is_line_correspondence(self):
return self._shape.size() == 2
def shape(self) -> Shape:
return self._shape
def reprojected_shape(self) -> Shape:
return self._reprojected_shape
def timestamp(self) -> int:
return self._timestamp
def is_on_road(self) -> bool:
return self._is_on_road
def set_id(self, id_: int):
self._id = id_
def id(self) -> int:
return self._id
def set_shape(self, shape: Shape):
self._shape = shape
def set_reprojected_shape(self, shape: Shape):
self._reprojected_shape = shape
def set_timestamp(self, timestamp: int):
self._timestamp = timestamp
def set_is_on_road(self, is_on_road: bool):
self._is_on_road = is_on_road
def actor(self) -> CorrespondenceActor:
return self._actor
def build_actor(self):
self._actor.geometry().set_image_coords(self._shape.coords())
self._actor.set_correspondence_coords(
self._reprojected_shape.coords())
if self._is_on_road:
# blue = (0, 191, 255)
blue = (220, 20, 60)
self._actor.property().set_color(*blue)
self._actor.set_point_color(blue)
else:
pink = (220, 20, 60)
self._actor.property().set_color(*pink)
self._actor.set_point_color(pink)
self._actor.set_correspondence_color((255, 255, 0))
self._actor.property().set_line_width(5)
return self._actor
def __str__(self):
return 'Correspondence: shape_coords: {}, vector_coords: {}, vector_vertices:{}, timestamp: {}'.format(
self._shape.coords(), self._reprojected_shape.coords(), self._reprojected_shape.origin_vertices(), self.timestamp()) | StarcoderdataPython |
3538181 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This import is for general library
import os
import threading
# This import is for ROS integration
import rospy
from sensor_msgs.msg import Image,CameraInfo
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
from darknet_ros_msgs.msg import BoundingBoxes,BoundingBox
import cv2
class PersonDetector():
def __init__(self):
# cv_bridge handles
self.cv_bridge = CvBridge()
self.person_bbox = BoundingBox()
# ROS PARAM
self.m_pub_threshold = rospy.get_param('~pub_threshold', 0.40)
# Subscribe
self.sub_camera_rgb = rospy.Subscriber('/camera/color/image_raw', Image, self.CamRgbImageCallback)
# self.sub_camera_depth = rospy.Subscriber('/camera/aligned_depth_to_color/image_raw', Image, self.CamDepthImageCallback)
self.sub_darknet_bbox = rospy.Subscriber('/darknet_ros/bounding_boxes', BoundingBoxes, self.DarknetBboxCallback)
self.image_pub = rospy.Publisher('/camera/yolo/image_raw', Image, queue_size=1)
self.distance = rospy.Publisher('/camera/yolo/distance', String, queue_size=1)
return
def empty(self, a):
pass
def stackImages(self, scale, imgArray):
rows = len(imgArray)
cols = len(imgArray[0])
rowsAvailable = isinstance(imgArray[0], list)
width = imgArray[0][0].shape[1]
height = imgArray[0][0].shape[0]
if rowsAvailable:
for x in range(0, rows):
for y in range(0, cols):
if imgArray[x][y].shape[:2] == imgArray[0][0].shape[:2]:
imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)
else:
imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]),
None, scale, scale)
if len(imgArray[x][y].shape) == 2: imgArray[x][y] = cv2.cvtColor(imgArray[x][y], cv2.COLOR_GRAY2BGR)
imageBlank = np.zeros((height, width, 3), np.uint8)
hor = [imageBlank] * rows
hor_con = [imageBlank] * rows
for x in range(0, rows):
hor[x] = np.hstack(imgArray[x])
ver = np.vstack(hor)
else:
for x in range(0, rows):
if imgArray[x].shape[:2] == imgArray[0].shape[:2]:
imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)
else:
imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None, scale, scale)
if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)
hor = np.hstack(imgArray)
ver = hor
return ver
def CamRgbImageCallback(self, rgb_image_data):
try:
rgb_image = self.cv_bridge.imgmsg_to_cv2(rgb_image_data, 'passthrough')
imgContour = rgb_image.copy()
except CvBridgeError, e:
rospy.logerr(e)
rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
# 人がいる場合
if self.person_bbox.probability > 0.0 :
# 一旦、BoundingBoxの中心位置の深度を取得 (今後改善予定)
cv2.rectangle(rgb_image, (self.person_bbox.xmin, self.person_bbox.ymin), (self.person_bbox.xmax, self.person_bbox.ymax),(0,0,255), 2)
text=""
text_top = (self.person_bbox.xmin, self.person_bbox.ymin - 10)
text_bot = (self.person_bbox.xmin + 80, self.person_bbox.ymin + 5)
text_pos = (self.person_bbox.xmin + 5, self.person_bbox.ymin)
cv2.rectangle(rgb_image, text_top, text_bot, (0,0,0),-1)
cv2.putText(rgb_image, text, text_pos, cv2.FONT_HERSHEY_SIMPLEX, 0.35, (255, 0, 255), 1)
self.image_pub.publish(self.cv_bridge.cv2_to_imgmsg(rgb_image))
# Gaussian Blur
imgBlur = cv2.GaussianBlur(rgb_image, (7, 7), 1)
# Gray
imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)
# Canny
threshold1 = cv2.getTrackbarPos('Threshold1', 'Parameters')
threshold2 = cv2.getTrackbarPos('Threshold2', 'Parameters')
imgCanny = cv2.Canny(imgGray, threshold1=threshold1, threshold2=threshold1)
# Dialation
kernel = np.ones((5, 5))
imgDil = cv2.dilate(imgCanny, kernel=kernel, iterations=1)
#Contours
self.getContours(imgDil, imgContour)
imgStack = self.stackImages(0.8, ([rgb_image, imgBlur, imgGray],[imgCanny, imgDil, imgContour]))
cv2.imshow('Result', imgStack)
cv2.namedWindow("rgb_image")
cv2.imshow("rgb_image", rgb_image)
cv2.waitKey(1)
# cv2.waitKey(10)
# cv2.normalize(self.m_depth_image, self.m_depth_image, 0, 32768, cv2.NORM_MINMAX)
# cv2.namedWindow("depth_image")
# cv2.imshow("depth_image", self.m_depth_image)
# cv2.waitKey(10)
return
def getContours(self, img, imgContour):
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
#cv2.findContours(入力画像、contour retrieval mode, 輪郭研修津方法)
for cnt in contours:
area = cv2.contourArea(cnt)
areaMin = cv2.getTrackbarPos("Area", "Parameters")
if area > areaMin:
#輪郭描画
cv2.drawContours(imgContour, cnt, -1, (255, 0, 255), 7)
peri = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
print(len(approx))
x, y, w, h = cv2.boundingRect(approx)
cv2.rectangle(imgContour, (x, y), (x + w, y + h), (0, 255, 0), 5)
cv2.putText(imgContour, "Points: " + str(len(approx)), (x + w + 20, y + 20), cv2.FONT_HERSHEY_COMPLEX, .7,
(0, 255, 0), 2)
cv2.putText(imgContour, "Area: " + str(int(area)), (x + w + 20, y + 45), cv2.FONT_HERSHEY_COMPLEX, 0.7,
(0, 255, 0), 2)
def DarknetBboxCallback(self, darknet_bboxs):
bboxs = darknet_bboxs.bounding_boxes
person_bbox = BoundingBox()
if len(bboxs) != 0 :
for i, bb in enumerate(bboxs) :
if bboxs[i].Class == 'person' and bboxs[i].probability >= self.m_pub_threshold:
person_bbox = bboxs[i]
self.person_bbox = person_bbox
if __name__ == '__main__':
try:
rospy.init_node('person_detector', anonymous=True)
idc = PersonDetector()
rospy.loginfo('idc Initialized')
rospy.spin()
except rospy.ROSInterruptException:
pass
| StarcoderdataPython |
12858971 | '''**********************************************
CODE TO IMPLEMENT FISHER'S LDA -
Given two dimensional dataset with two classes 0 and 1,
Perform Fisher's LDA on the dataset,
Perform dimensionality reduction and find the suitable vector to project it onto,
Find the threshold value for separation of the two classes
***********************************************'''
import numpy as np
import matplotlib.pyplot as plt
import time
# to calculate the execution time of th clustering
start_time = time.time()
# reading data csv file
my_data = np.genfromtxt('datasets/dataset_3.csv', delimiter=',')
# deleting the serial number column
data=np.delete(my_data,0,1)
# separating the two classes and deleting the target variable column
class0 = data[np.nonzero(data[:,2] == 0)]
class1=data[np.nonzero(data[:,2]==1)]
class0=np.delete(class0,2,1)
class1=np.delete(class1,2,1)
# finding the mean of the the two classes
mean0=np.mean(class0,0)
mean1=np.mean(class1,0)
''' calculating the variability of the two classes using the formula :
variability=summation over points belonging to class 1((xi-mean)(xi-mean)tanspose)
'''
var0=np.zeros(1)
temp=np.array(mean0)
for i in range (class0.shape[0]) :
temp=(class0[i,:]-mean0)
var0+=np.dot(temp, temp.T)
var1=np.zeros(1)
temp=np.array(mean1)
for i in range (class1.shape[0]) :
temp=(class1[i,:]-mean1)
var1+=np.dot(temp, temp.T)
sw=var1+var0
# calculating the inverse of Sw matrix
invsw=np.array([(1/sw[0])])
# calculating the w vector using below formula
w=invsw*(mean1-mean0)
# declaring arrays for storing points' distance from the vector
dist0=np.zeros((class0.shape[0],1))
dist1=np.zeros((class1.shape[0],1))
# finding the the vector to project the points on;
# such that the means are farthest from each other
wperp=np.array([-w[1],w[0]])
# finding the norm of the w vector
norm_w=np.linalg.norm(wperp)
''' calculating the distance of original data points from the vector using the formula:
r=w.T/norm(w)
'''
for i in range(dist0.shape[0]):
dist0[i]=np.dot(wperp.T,class0[i,:])/norm_w
for i in range(dist1.shape[0]):
dist1[i]=np.dot(wperp.T,class1[i,:])/norm_w
''' declaring the arrays to store the projected points data using formula:
x_projected = x_actual-r*w/norm(w)
'''
class0proj=np.zeros((class0.shape[0],2))
class1proj=np.zeros((class1.shape[0],2))
for i in range(class0.shape[0]):
class0proj[i,:]=np.subtract((class0[i,:]),(dist0[i]*wperp.T/norm_w))
for i in range(class1.shape[0]):
class1proj[i,:]=np.subtract((class1[i,:]),(dist1[i]*wperp.T/norm_w))
# displaying the plot with the original data , projected points and line
plt.scatter(class0[:,0],class0[:,1])
plt.scatter(class1[:,0],class1[:,1])
plt.scatter(class0proj[:,0],class0proj[:,1],color='blue')
plt.scatter(class1proj[:,0],class1proj[:,1],color='red')
#concatenating the two classes into a single array
pointsproj=np.concatenate((class0proj,class1proj),axis=0)
plt.plot(pointsproj[:,0],pointsproj[:,1],'m')
# storing dimensionally reduced projected points in array using formula:
# y(x) = w.T*x
newproj0=np.zeros((class0.shape[0],1))
newproj1=np.zeros((class1.shape[0],1))
for i in range(class0.shape[0]):
newproj0[i,:]=np.dot(wperp.T,class0[i,:])
for i in range(class1.shape[0]):
newproj1[i,:]=np.dot(wperp.T,class1[i,:])
# storing the means and standard deviations of the projected points
proj0mean=np.mean(newproj0)
proj1mean=np.mean(newproj1)
proj0std=np.std(newproj0)
proj1std=np.std(newproj1)
'''
Below function "solve" to finds the threshold value separating the two
classes when dimensionally reduced -
input : m1, m2 - means of the two classes whose point of intersection needs to be found
std1, std2 - the standard deviations of the two classes
'''
def solve(m1,m2,std1,std2):
a = 1/(2*std1**2) - 1/(2*std2**2)
b = m2/(std2**2) - m1/(std1**2)
c = m1**2 /(2*std1**2) - m2**2 / (2*std2**2) - np.log(std2/std1)
roots= np.roots([a,b,c])
# since two possible points of intersection , we select the one which lies in between the two means
if roots.shape[0]>1:
for i in range(2):
if roots[i] !=max(m1,m2,roots[i]) or roots[i]!=min(m1,m2,roots[i]):
return roots[i]
else:
return roots
threshold=solve(proj0mean,proj1mean,proj0std,proj1std)
print("Threshold value =", threshold)
print("Time taken = ",(time.time()-start_time))
plt.savefig('Results/Result3.png')
| StarcoderdataPython |
3567049 | import copy
import time
from time import sleep
import math3d as m3d
import urx
from math import pi
import numpy as np
# from GTac_Data import gtac_data
import GTac_Data
def safty_check(robot, goal_tool, x_lim, y_lim, z_lim):
# units: meter
goal_tool_transform = m3d.Transform(goal_tool)
# print('To execute goal_tool:{}'.format(goal_tool_transformed))
estimeted_final_pose = robot.get_pose() * goal_tool_transform
print('Safty check: Estimated final pose:{}'.format(estimeted_final_pose))
goal_x_base = estimeted_final_pose.pos[0]
goal_y_base = estimeted_final_pose.pos[1]
goal_z_base = estimeted_final_pose.pos[2]
x_result = goal_x_base > min(x_lim) and goal_x_base < max(x_lim)
y_result = goal_y_base > min(y_lim) and goal_y_base < max(y_lim)
z_result = goal_z_base > min(z_lim) and goal_z_base < max(z_lim)
xyz_result = [x_result, y_result, z_result]
final_result = x_result and y_result and z_result
print('Safety check: {}, {}'.format(final_result, xyz_result))
return final_result, xyz_result
def rob_movel_tool(robot, goal_tool, acc=0.02, vel=0.02, wait=True):
if goal_tool != [0 for _ in goal_tool]:
try:
goal_tool_transformed = m3d.Transform(goal_tool)
print('To execute goal_tool:{}'.format(goal_tool_transformed))
estimeted_final_pose = robot.get_pose() * goal_tool_transformed
print('Estimated final pose:{}'.format(estimeted_final_pose))
# rob.movel((0, 0, 0, 0, 0 , 0), acc=0.01, vel=0.01, relative=True, wait=False) # move relative to current pose
robot.movel_tool(goal_tool_transformed, acc=acc, vel=vel,
wait=wait) # move linear to given pose in tool coordinate
# rob.set_pose(tool_orient_normal, acc=0.01, vel=0.01)
# rob.translate_tool((0.02, 0, 0), wait=False)
except:
print("Robot could not execute move (emergency stop for example), do something")
finally:
print('UR 10 moved in tool base: {}'.format(goal_tool))
trans = robot.get_pose()
print('current pose::{}'.format(trans))
if not wait:
while True:
sleep(0.01) #sleep first since the robot may not have processed the command yet
print(robot.is_program_running())
if robot.is_program_running():
break
def safe_movel_tool(robot, goal_tool, x_lim, y_lim, z_lim, acc=0.02, vel=0.02, wait=True):
if goal_tool != [0 for _ in goal_tool]:
safe_result, _ = safty_check(robot, goal_tool, x_lim, y_lim, z_lim)
if safe_result:
rob_movel_tool(robot, goal_tool, acc=acc, vel=vel, wait=wait)
def main_ur10_repeat(goal_tool=None, times=1, boundary=[[0, 0.15], [-1.08, -1.2], [-0.17, 0.1]], acc=0.02, vel=0.02, wait=True):
print('try to connect ur10')
rob = urx.Robot("192.168.2.100")
rob.set_tcp((0, 0, 0.225, 0, 0, 0))
rob.set_payload(0.5, (0, 0, 0.225))
sleep(0.2) # leave some time to robot to process the setup commands
tool_orient_normal = m3d.Transform()
print('ur10 connected')
print(rob.is_program_running())
i = 0
while i < times:
time.sleep(0.01)
# i += 1
ts = time.time()
print('rob.is_program_running: {}'.format(rob.is_program_running()))
if not rob.is_program_running():
safe_movel_tool(robot=rob, goal_tool=goal_tool,
x_lim=boundary[0],
y_lim=boundary[1],
z_lim=boundary[2],
acc=acc,
vel=vel,
wait=wait)
i += 1
goal_tool = [-x for x in goal_tool] # inverse the command to repeat
# time.sleep(1)
trans = rob.get_pose()
print('{}-current pose::{}'.format(i, trans))
rob.close()
def robot_stop(robot, stop_time=0.3):
robot.stopl()
print('Stopping the robot')
time.sleep(stop_time) # wait a while to start the next command to avoid shock.
def main_ur10_thread(q_ur=None, dq_ur10_cmd_exc=None, data_points=5000, boundary=[[0, 0.15], [-1.08, -1.2], [-0.17, 0.1]], acc=0.05, vel=0.05, wait=False, dq_stop_sign=None):
print('try to connect ur10')
rob = urx.Robot("192.168.1.100")
rob.set_tcp((0, 0, 0.225, 0, 0, 0))
rob.set_payload(0.5, (0, 0, 0.25))
sleep(0.2) # leave some time to robot to process the setup commands
tool_orient_normal = m3d.Transform()
print('ur10 connected')
i = 0
preivous_time = 0
ts = time.time()
goal_tool = [0, 0, 0, 0, 0, 0]
while i < data_points:
if dq_stop_sign is not None and len(dq_stop_sign) > 0 and dq_stop_sign[-1] == True:
break
i += 1
time.sleep(0.01)
preivous_time = time.time()
try:
# print('rob.is_program_running: {}'.format(rob.is_program_running()))
if not q_ur.empty():
goal_tool = q_ur.get(timeout=0.1)
q_ur.task_done()
# print('{} ms: UR10 got new goal tool:{} '.format(round(preivous_time-ts, 3)*1000, goal_tool))
if goal_tool == 'stop':
robot_stop(robot=rob, stop_time=0.3)
else:
if not rob.is_program_running():
if goal_tool != [0 for _ in goal_tool]:
safe_movel_tool(robot=rob, goal_tool=goal_tool,
x_lim=boundary[0],
y_lim=boundary[1],
z_lim=boundary[2],
acc=acc,
vel=vel,
wait=wait)
exc = dq_ur10_cmd_exc[-1]
dq_ur10_cmd_exc.append(copy.copy(exc)+1) # marker one more execution
# print('dq_ur10_cmd_exc: {}, goal_tool: {}'.format(dq_ur10_cmd_exc, goal_tool))
goal_tool = [0, 0, 0, 0, 0, 0] # init the command after being sent
else:
print('The previous UR command has not been completed')
except:
continue
# print('{}:{}'.format(i, rob.get_pose()))
# while True:
# sleep(0.1) #sleep first since the robot may not have processed the command yet
# if rob.is_program_running():
# break
rob.close()
def UR10_leave_admittance_mode():
return True
# enter adnittance mode
# input: m-- apperant mess of the robot
# k-- stiffness of the robot
# b-- damp coefficient of the robot
# force_sensor-- sensor indicator
# output: null
def Read_force_sensor(q_gtac=None, amp=100):
# read data from the sensor and formate into a vector
# input: sensor indicator
# output: force_arr[Fx, Fy, Fz, Tx, Ty, Tz]
data_gtac = q_gtac.get(timeout=1)
q_gtac.task_done()
sec_data, _ = GTac_Data.gtac_data.find_sec_data(data_frame_array=data_gtac, finger=4, sec=0)
f_x = sec_data[-3]/amp
f_y = sec_data[-2]/amp
f_z = sec_data[-1]/amp
arr = np.zeros([6, 1])
arr[0] = f_x
arr[1] = f_y
arr[2] = f_z
print('UR10 admittance got: {}'.format(arr))
return arr
def UR10_enter_admittance_thread(q_gtac=None, m = 20, k = 0, b = 50):
# set a flag for leave the admittance thread
# input: null
# output: force_arr[Fx, Fy, Fz, Tx, Ty, Tz]
print('try to connect ur10')
rob = urx.Robot("192.168.1.100")
rob.set_tcp((0, 0, 0.225, 0, 0, 0))
rob.set_payload(0.5, (0, 0, 0.25))
sleep(0.2) # leave some time to robot to process the setup commands
tool_orient_normal = m3d.Transform()
print('ur10 connected')
array_size = 5
acc = np.zeros([array_size, 6, 1])
vel = np.zeros([array_size, 6, 1])
pos = np.zeros([array_size, 6, 1])
front = 0
rear = 0
while True:
begin = time.time()
period = 0.01
force_formated = Read_force_sensor(q_gtac=q_gtac, amp=100)
acc[front] = (force_formated - b*vel[rear] - k*pos[rear])/m
vel[front] = period*(acc[front] + acc[rear])/2 + vel[rear]
pos[front] = period*(vel[front] + vel[rear])/2 + pos[rear]
rob.speedx("speedl",vel(front),acc(front))
rear = front
front = front + 1
if front > array_size:
front = 0
time.sleep((begin + period - time.time()) if time.time() - begin < period else 0)
if UR10_leave_admittance_mode():
break
print("have leave the admittance mode")
rob.close()
def init_ur_handover(pos, acc=0.02, vel=0.01,):
print('try to connect ur10')
rob = urx.Robot("192.168.1.100")
rob.set_tcp((0, 0, 0.225, 0, 0, 0))
rob.set_payload(0.5, (0, 0, 0.25))
sleep(0.2) # leave some time to robot to process the setup commands
tool_orient_normal = m3d.Transform()
print('ur10 connected')
try:
rob.set_pose(pos, acc, vel, wait=False)
except:
print("Robot could not execute move (emergency stop for example), do something")
finally:
print('UR 10 moved in tool base: {}'.format(goal_tool))
trans = rob.get_pose()
print('current pose::{}'.format(trans))
# while True:
# sleep(0.01) # sleep first since the robot may not have processed the command yet
# print('rob.is_program_running'.format(rob.is_program_running()))
# if rob.is_program_running():
# break
rob.close()
def main_move_in_loop(loop,):
acc_map = {0: 0.06,
1: 0.03,
2: 0.02,
3: 0.06,
}
vel_map = {0: 0.06,
1: 0.03,
2: 0.02,
3: 0.06,
}
for i, goal_tool in enumerate(loop):
print('Executing {} in tool space'.format(goal_tool))
acc = acc_map[i]
vel = vel_map[i]
main_ur10_repeat(goal_tool, times=1, boundary=boundary, acc=acc, vel=vel, wait=True)
if i == 0:
time.sleep(4)
if __name__ == '__main__':
# main_ur10_thread()
goal_tool1 = [-0.1, 0, 0.05, 0, 0, 0] # move the robot in tool cord
goal_tool2 = [0, 0.1, 0, 0, 0, 0] # move the robot in tool cord
goal_tool3 = [0, -0.12, 0, 0, 0, 0] # move the robot in tool cord
goal_tool4 = [0.1, 0.02, -0.05, 0, 0, 0] # move the robot in tool cord
goal_tool_adj = [0.05, 0, -0.1, 0, 0, 0] # move the robot in tool cord
loop_egg_gsp = [goal_tool1, goal_tool2, goal_tool3, goal_tool4]
boundary = [[-0.05, 0.15], [-1.08, -1.4], [-0.192, 0.1]]
init_pos = m3d.Transform()
init_pos.orient = np.array([[0.99112324, 0.10186745, -0.08542689],
[-0.08254139, -0.03222018, -0.99606665],
[-0.10421924, 0.99427606, -0.02352589]])
init_pos.pos = [0.13108, -1.12309, -0.07]
acc = 0.02
vel = 0.02
main_move_in_loop(loop=loop_egg_gsp)
# main_ur10_repeat(goal_tool4, times=1, boundary=boundary, acc=acc, vel=vel, wait=True)
# init_ur_handover(init_pos)
| StarcoderdataPython |
5088938 | <reponame>alvesmatheus/reconhecimento-temas-comissoes
import pandas as pd
PATH_TRANSCRICOES = './data/2009-2018/00-transcricoes.csv'
PATH_METADADOS = './data/2009-2018/01-metadados.csv'
transcricoes = pd.read_csv(PATH_TRANSCRICOES)
metadados = transcricoes.filter(items=['id_evento', 'comissao', 'ano', 'data',
'categoria_evento'])
metadados.to_csv(PATH_METADADOS, index=False)
| StarcoderdataPython |
9760798 | MAX_ITERATIONS = 15
TOLERANCE = 0.001
# https://en.wikipedia.org/wiki/B%C3%A9zier_curve
# P0(0, 0), P1(x1, y1), P2(x2, y2), P3(1, 1)
# B(t) = ((1-t)^3)*P0 + (3t(1-t)^2)*P1 + (3t^2(1-t))*P2 + t^3*P3
# = (3t(1-t)^2)*P1 + (3t^2(1-t))*P2 + t^3
# Returns f(t) or z(t) by given value x1, x2 or y1, y2
def calculateBezier(t, Z1, Z2):
return 3.0*t*Z1*((1-t)**2) + 3.0*(t**2)*Z2*(1-t) + t**3
# B`(t) = (3(1-t)^2)*P1 + (6t(1-t))(P2-P1) + 3t^2(P3-P2)
# Remember that P3 is always (1, 1)
# Returns dx/dt or dy/dt by given value x1, x2 or y1, y2
def calculateSlope(t, Z1, Z2):
Z3 = 1.0
return 3.0*Z1*((1-t)**2) + (6.0*t*(1-t))*(Z2-Z1) + 3.0*(t**2)*(Z3-Z2)
def newtonRaphson(X_target, X_initial, X1, X2):
for _ in range(MAX_ITERATIONS):
slope = calculateSlope(X_initial, X1, X2)
if(abs(slope) <= TOLERANCE):
return X_initial
X = calculateBezier(X_initial, X1, X2) - X_target
X_initial = X_initial - X / slope
return X_initial
def easing(X, X1, Y1, X2, Y2):
if (X1 < 0 or X1 > 1 or X2 < 0 or X2 > 1):
raise Exception("X Values should be in range [0, 1]")
if (X1 == Y1 and X2 == Y2): # The curve is linear
return X
# TODO: Find optimized initial value instead of hard-coded 0.5
T = newtonRaphson(X, 0.5, X1, X2)
return calculateBezier(T, Y1, Y2) | StarcoderdataPython |
3329547 | # -*- coding: utf-8 -*-
from openerp import models,fields,api,tools,SUPERUSER_ID
from openerp.tools.translate import _
import time
from datetime import datetime
from openerp.osv import osv
import xmlrpclib
class is_type_equipement(models.Model):
_name='is.type.equipement'
_order='name'
name = fields.Char("Name", required=True)
class is_moyen_fabrication(models.Model):
_name='is.moyen.fabrication'
_order='name'
_sql_constraints = [('name_uniq','UNIQUE(name)', u'Ce code existe déjà')]
name = fields.Char("Code", required=True)
type_equipement = fields.Many2one('is.type.equipement', string='Type équipement', required=True)
type_equipement_name = fields.Char('Type équipement name' , related='type_equipement.name', readonly=True)
lieu_changement = fields.Selection([('presse', 'sur presse'), ('mecanique', 'en mécanique')], "Lieu changement")
designation = fields.Char("Désignation", required=False)
mold_ids = fields.Many2many('is.mold' ,'is_moyen_fabrication_id' , 'is_mold_id_fabric' , string='Moule')
dossierf_ids = fields.Many2many('is.dossierf','is_moyen_fabrication_dossierf_id', 'is_dossierf_id_fabric', string='Dossier F')
base_capacitaire = fields.Char("Base capacitaire")
site_id = fields.Many2one('is.database', string='Site')
emplacement = fields.Char("Emplacement")
fournisseur_id = fields.Many2one('res.partner', string='Fournisseur', domain=[('supplier','=',True),('is_company','=',True)])
ref_fournisseur = fields.Char("Réf fournisseur")
date_creation = fields.Date('Date de création')
date_fin = fields.Date('Date de fin')
is_database_origine_id = fields.Integer("Id d'origine", readonly=True)
active = fields.Boolean('Active', default=True)
@api.multi
def write(self, vals):
try:
res=super(is_moyen_fabrication, self).write(vals)
for obj in self:
obj.copy_other_database_moyen_fabrication()
return res
except Exception as e:
raise osv.except_osv(_('Fabrication!'),
_('(%s).') % str(e).decode('utf-8'))
@api.model
def create(self, vals):
try:
obj=super(is_moyen_fabrication, self).create(vals)
obj.copy_other_database_moyen_fabrication()
return obj
except Exception as e:
raise osv.except_osv(_('Fabrication!'),
_('(%s).') % str(e).decode('utf-8'))
@api.multi
def copy_other_database_moyen_fabrication(self):
cr , uid, context = self.env.args
context = dict(context)
database_obj = self.env['is.database']
database_lines = database_obj.search([])
for fabrication in self:
for database in database_lines:
if not database.ip_server or not database.database or not database.port_server or not database.login or not database.password:
continue
DB = database.database
USERID = SUPERUSER_ID
DBLOGIN = database.login
USERPASS = <PASSWORD>
DB_SERVER = database.ip_server
DB_PORT = database.port_server
sock = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/object' % (DB_SERVER, DB_PORT))
moyen_fabrication_vals = self.get_moyen_fabrication_vals(fabrication, DB, USERID, USERPASS, sock)
dest_moyen_fabrication_ids = sock.execute(DB, USERID, USERPASS, 'is.moyen.fabrication', 'search', [('is_database_origine_id', '=', fabrication.id),
'|',('active','=',True),('active','=',False)], {})
if dest_moyen_fabrication_ids:
sock.execute(DB, USERID, USERPASS, 'is.moyen.fabrication', 'write', dest_moyen_fabrication_ids, moyen_fabrication_vals, {})
moyen_fabrication_created_id = dest_moyen_fabrication_ids[0]
else:
moyen_fabrication_created_id = sock.execute(DB, USERID, USERPASS, 'is.moyen.fabrication', 'create', moyen_fabrication_vals, {})
return True
@api.model
def get_moyen_fabrication_vals(self, fabrication, DB, USERID, USERPASS, sock):
moyen_fabrication_vals ={
'name' : tools.ustr(fabrication.name or ''),
'type_equipement' : self._get_type_equipement(fabrication, DB, USERID, USERPASS, sock),
'lieu_changement' : tools.ustr(fabrication.lieu_changement or ''),
'designation' : tools.ustr(fabrication.designation or ''),
'mold_ids' : self._get_mold_ids(fabrication , DB, USERID, USERPASS, sock),
'dossierf_ids' : self._get_dossierf_ids(fabrication , DB, USERID, USERPASS, sock),
'base_capacitaire' : tools.ustr(fabrication.base_capacitaire or ''),
'emplacement' : tools.ustr(fabrication.emplacement or '') ,
'fournisseur_id' : self._get_fournisseur_id(fabrication, DB, USERID, USERPASS, sock),
'ref_fournisseur' : tools.ustr(fabrication.ref_fournisseur or ''),
'date_creation' : fabrication.date_creation,
'date_fin' : fabrication.date_fin,
'site_id' : self._get_site_id(fabrication, DB, USERID, USERPASS, sock),
'active' : fabrication.site_id and fabrication.site_id.database == DB and True or False,
'is_database_origine_id': fabrication.id,
}
return moyen_fabrication_vals
@api.model
def _get_site_id(self, fabrication, DB, USERID, USERPASS, sock):
if fabrication.site_id:
ids = sock.execute(DB, USERID, USERPASS, 'is.database', 'search', [('is_database_origine_id', '=', fabrication.site_id.id)], {})
if ids:
return ids[0]
return False
@api.model
def _get_type_equipement(self, fabrication, DB, USERID, USERPASS, sock):
if fabrication.type_equipement:
type_equipement_ids = sock.execute(DB, USERID, USERPASS, 'is.type.equipement', 'search', [('is_database_origine_id', '=', fabrication.type_equipement.id)], {})
if not type_equipement_ids:
fabrication.type_equipement.copy_other_database_type_equipement()
type_equipement_ids = sock.execute(DB, USERID, USERPASS, 'is.type.equipement', 'search', [('is_database_origine_id', '=', fabrication.type_equipement.id)], {})
if type_equipement_ids:
return type_equipement_ids[0]
return False
@api.model
def _get_mold_ids(self, fabrication , DB, USERID, USERPASS, sock):
lst_moule_ids = []
for moule in fabrication.mold_ids:
is_mold_ids = sock.execute(DB, USERID, USERPASS, 'is.mold', 'search', [('is_database_origine_id', '=', moule.id)], {})
if is_mold_ids:
lst_moule_ids.append(is_mold_ids[0])
return [(6,0,lst_moule_ids)]
@api.model
def _get_dossierf_ids(self, fabrication , DB, USERID, USERPASS, sock):
ids = []
for dossierf in fabrication.dossierf_ids:
res = sock.execute(DB, USERID, USERPASS, 'is.dossierf', 'search', [('is_database_origine_id', '=', dossierf.id)], {})
if res:
ids.append(res[0])
return [(6,0,ids)]
@api.model
def _get_fournisseur_id(self, fabrication, DB, USERID, USERPASS, sock):
if fabrication.fournisseur_id:
fournisseur_ids = sock.execute(DB, USERID, USERPASS, 'res.partner', 'search', [('is_database_origine_id', '=', fabrication.fournisseur_id.id)], {})
if fournisseur_ids:
return fournisseur_ids[0]
return False
| StarcoderdataPython |
3557654 | <reponame>williambrode/google-resumable-media-python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import time
BUCKET_NAME = "grpm-systest-{}".format(int(1000 * time.time()))
BUCKET_POST_URL = "https://www.googleapis.com/storage/v1/b/"
BUCKET_URL = "https://www.googleapis.com/storage/v1/b/{}".format(BUCKET_NAME)
_DOWNLOAD_BASE = "https://www.googleapis.com/download/storage/v1/b/{}".format(
BUCKET_NAME
)
DOWNLOAD_URL_TEMPLATE = _DOWNLOAD_BASE + "/o/{blob_name}?alt=media"
_UPLOAD_BASE = (
"https://www.googleapis.com/upload/storage/v1/b/{}".format(BUCKET_NAME)
+ "/o?uploadType="
)
SIMPLE_UPLOAD_TEMPLATE = _UPLOAD_BASE + "media&name={blob_name}"
MULTIPART_UPLOAD = _UPLOAD_BASE + "multipart"
RESUMABLE_UPLOAD = _UPLOAD_BASE + "resumable"
METADATA_URL_TEMPLATE = BUCKET_URL + "/o/{blob_name}"
GCS_RW_SCOPE = "https://www.googleapis.com/auth/devstorage.read_write"
# Generated using random.choice() with all 256 byte choices.
ENCRYPTION_KEY = (
b"<KEY>"
b"\x08 Y\x13\xe2\n\x02i\xadc\xe2\xd99x"
)
def get_encryption_headers(key=ENCRYPTION_KEY):
"""Builds customer-supplied encryption key headers
See `Managing Data Encryption`_ for more details.
Args:
key (bytes): 32 byte key to build request key and hash.
Returns:
Dict[str, str]: The algorithm, key and key-SHA256 headers.
.. _Managing Data Encryption:
https://cloud.google.com/storage/docs/encryption
"""
key_hash = hashlib.sha256(key).digest()
key_hash_b64 = base64.b64encode(key_hash)
key_b64 = base64.b64encode(key)
return {
"x-goog-encryption-algorithm": "AES256",
"x-goog-encryption-key": key_b64.decode("utf-8"),
"x-goog-encryption-key-sha256": key_hash_b64.decode("utf-8"),
}
| StarcoderdataPython |
1933867 | <reponame>LaudateCorpus1/python-logging
# Copyright 2021 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestStructuredLogHandler(unittest.TestCase):
PROJECT = "PROJECT"
def _get_target_class(self):
from google.cloud.logging.handlers import StructuredLogHandler
return StructuredLogHandler
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
@staticmethod
def create_app():
import flask
app = flask.Flask(__name__)
@app.route("/")
def index():
return "test flask trace" # pragma: NO COVER
return app
def test_ctor_defaults(self):
handler = self._make_one()
self.assertIsNone(handler.project_id)
def test_ctor_w_project(self):
handler = self._make_one(project_id="foo")
self.assertEqual(handler.project_id, "foo")
def test_format(self):
import logging
import json
labels = {"default_key": "default-value"}
handler = self._make_one(labels=labels)
logname = "loggername"
message = "hello world,嗨 世界"
pathname = "testpath"
lineno = 1
func = "test-function"
record = logging.LogRecord(
logname, logging.INFO, pathname, lineno, message, None, None, func=func
)
expected_labels = {**labels, "python_logger": logname}
expected_payload = {
"message": message,
"severity": record.levelname,
"logging.googleapis.com/trace": "",
"logging.googleapis.com/spanId": "",
"logging.googleapis.com/trace_sampled": False,
"logging.googleapis.com/sourceLocation": {
"file": pathname,
"line": lineno,
"function": func,
},
"httpRequest": {},
"logging.googleapis.com/labels": expected_labels,
}
handler.filter(record)
result = json.loads(handler.format(record))
for (key, value) in expected_payload.items():
self.assertEqual(value, result[key])
self.assertEqual(
len(expected_payload.keys()),
len(result.keys()),
f"result dictionary has unexpected keys: {result.keys()}",
)
def test_format_minimal(self):
import logging
import json
handler = self._make_one()
record = logging.LogRecord(None, logging.INFO, None, None, None, None, None,)
record.created = None
expected_payload = {
"severity": "INFO",
"logging.googleapis.com/trace": "",
"logging.googleapis.com/spanId": "",
"logging.googleapis.com/trace_sampled": False,
"logging.googleapis.com/sourceLocation": {},
"httpRequest": {},
"logging.googleapis.com/labels": {},
}
handler.filter(record)
result = json.loads(handler.format(record))
self.assertEqual(set(expected_payload.keys()), set(result.keys()))
for (key, value) in expected_payload.items():
self.assertEqual(
value, result[key], f"expected_payload[{key}] != result[{key}]"
)
def test_format_with_quotes(self):
"""
When logging a message containing quotes, escape chars should be added
"""
import logging
handler = self._make_one()
message = '"test"'
expected_result = '\\"test\\"'
record = logging.LogRecord(None, logging.INFO, None, None, message, None, None,)
record.created = None
handler.filter(record)
result = handler.format(record)
self.assertIn(expected_result, result)
def test_format_with_exception(self):
"""
When logging a message with an exception, the stack trace should not be appended
"""
import logging
import json
handler = self._make_one()
exception_tuple = (Exception, Exception(), None)
message = "test"
record = logging.LogRecord(
None, logging.INFO, None, None, message, None, exception_tuple
)
record.created = None
handler.filter(record)
result = json.loads(handler.format(record))
self.assertEqual(result["message"], f"{message}\nException")
def test_format_with_line_break(self):
"""
When logging a message containing \n, it should be properly escaped
"""
import logging
handler = self._make_one()
message = "test\ntest"
expected_result = "test\\ntest"
record = logging.LogRecord(None, logging.INFO, None, None, message, None, None,)
record.created = None
handler.filter(record)
result = handler.format(record)
self.assertIn(expected_result, result)
def test_format_with_custom_formatter(self):
"""
Handler should respect custom formatters attached
"""
import logging
handler = self._make_one()
logFormatter = logging.Formatter(fmt="%(name)s :: %(levelname)s :: %(message)s")
handler.setFormatter(logFormatter)
message = "test"
expected_result = "logname :: INFO :: test"
record = logging.LogRecord(
"logname", logging.INFO, None, None, message, None, None,
)
record.created = None
handler.filter(record)
result = handler.format(record)
self.assertIn(expected_result, result)
self.assertIn("message", result)
def test_dict(self):
"""
Handler should parse json encoded as a string
"""
import logging
handler = self._make_one()
message = {"x": "test"}
expected_result = '"x": "test"'
record = logging.LogRecord(
"logname", logging.INFO, None, None, message, None, None,
)
record.created = None
handler.filter(record)
result = handler.format(record)
self.assertIn(expected_result, result)
self.assertNotIn("message", result)
def test_encoded_json(self):
"""
Handler should parse json encoded as a string
"""
import logging
handler = self._make_one()
logFormatter = logging.Formatter(fmt='{ "name" : "%(name)s" }')
handler.setFormatter(logFormatter)
expected_result = '"name": "logname"'
record = logging.LogRecord(
"logname", logging.INFO, None, None, None, None, None,
)
record.created = None
handler.filter(record)
result = handler.format(record)
self.assertIn(expected_result, result)
self.assertNotIn("message", result)
def test_format_with_arguments(self):
"""
Handler should support format string arguments
"""
import logging
handler = self._make_one()
message = "name: %s"
name_arg = "Daniel"
expected_result = "name: Daniel"
record = logging.LogRecord(
None, logging.INFO, None, None, message, name_arg, None,
)
record.created = None
handler.filter(record)
result = handler.format(record)
self.assertIn(expected_result, result)
def test_format_with_request(self):
import logging
import json
handler = self._make_one()
logname = "loggername"
message = "hello world,嗨 世界"
record = logging.LogRecord(logname, logging.INFO, "", 0, message, None, None)
expected_path = "http://testserver/123"
expected_agent = "Mozilla/5.0"
expected_trace = "123"
expected_span = "456"
trace_header = f"{expected_trace}/{expected_span};o=1"
expected_payload = {
"logging.googleapis.com/trace": expected_trace,
"logging.googleapis.com/spanId": expected_span,
"logging.googleapis.com/trace_sampled": True,
"httpRequest": {
"requestMethod": "GET",
"requestUrl": expected_path,
"userAgent": expected_agent,
"protocol": "HTTP/1.1",
},
}
app = self.create_app()
with app.test_request_context(
expected_path,
headers={
"User-Agent": expected_agent,
"X_CLOUD_TRACE_CONTEXT": trace_header,
},
):
handler.filter(record)
result = json.loads(handler.format(record))
for (key, value) in expected_payload.items():
self.assertEqual(value, result[key])
def test_format_with_traceparent(self):
import logging
import json
handler = self._make_one()
logname = "loggername"
message = "hello world,嗨 世界"
record = logging.LogRecord(logname, logging.INFO, "", 0, message, None, None)
expected_path = "http://testserver/123"
expected_agent = "Mozilla/5.0"
expected_trace = "4bf92f3577b34da6a3ce929d0e0e4736"
expected_span = "00f067aa0ba902b7"
trace_header = f"00-{expected_trace}-{expected_span}-09"
expected_payload = {
"logging.googleapis.com/trace": expected_trace,
"logging.googleapis.com/spanId": expected_span,
"logging.googleapis.com/trace_sampled": True,
"httpRequest": {
"requestMethod": "GET",
"requestUrl": expected_path,
"userAgent": expected_agent,
"protocol": "HTTP/1.1",
},
}
app = self.create_app()
with app.test_request_context(
expected_path,
headers={"User-Agent": expected_agent, "TRACEPARENT": trace_header},
):
handler.filter(record)
result = json.loads(handler.format(record))
for (key, value) in expected_payload.items():
self.assertEqual(value, result[key])
def test_format_overrides(self):
"""
Allow users to override log fields using `logging.info("", extra={})`
If supported fields were overriden by the user, those choices should
take precedence.
"""
import logging
import json
default_labels = {
"default_key": "default-value",
"overwritten_key": "bad_value",
}
handler = self._make_one(labels=default_labels)
logname = "loggername"
message = "hello world,嗨 世界"
record = logging.LogRecord(logname, logging.INFO, "", 0, message, None, None)
overwrite_path = "http://overwrite"
inferred_path = "http://testserver/123"
overwrite_trace = "abc"
overwrite_span = "def"
inferred_trace_span = "123/456;o=1"
overwrite_file = "test-file"
record.http_request = {"requestUrl": overwrite_path}
record.source_location = {"file": overwrite_file}
record.trace = overwrite_trace
record.span_id = overwrite_span
record.trace_sampled = False
added_labels = {"added_key": "added_value", "overwritten_key": "new_value"}
record.labels = added_labels
expected_payload = {
"logging.googleapis.com/trace": overwrite_trace,
"logging.googleapis.com/spanId": overwrite_span,
"logging.googleapis.com/trace_sampled": False,
"logging.googleapis.com/sourceLocation": {"file": overwrite_file},
"httpRequest": {"requestUrl": overwrite_path},
"logging.googleapis.com/labels": {
"default_key": "default-value",
"overwritten_key": "new_value",
"added_key": "added_value",
"python_logger": logname,
},
}
app = self.create_app()
with app.test_client() as c:
c.put(
path=inferred_path,
data="body",
headers={"X_CLOUD_TRACE_CONTEXT": inferred_trace_span},
)
handler.filter(record)
result = json.loads(handler.format(record))
for (key, value) in expected_payload.items():
self.assertEqual(value, result[key])
def test_format_with_json_fields(self):
"""
User can add json_fields to the record, which should populate the payload
"""
import logging
import json
handler = self._make_one()
message = "name: %s"
name_arg = "Daniel"
expected_result = "name: Daniel"
json_fields = {"hello": "world", "number": 12}
record = logging.LogRecord(
None, logging.INFO, None, None, message, name_arg, None,
)
record.created = None
setattr(record, "json_fields", json_fields)
handler.filter(record)
result = json.loads(handler.format(record))
self.assertEqual(result["message"], expected_result)
self.assertEqual(result["hello"], "world")
self.assertEqual(result["number"], 12)
| StarcoderdataPython |
5079368 | <filename>python/Itertools/compress-the-string.py<gh_stars>0
# https://www.hackerrank.com/challenges/compress-the-string/problem
from itertools import groupby
for k, g in groupby(input()):
print(f"({len(list(g))}, {k})", end=" ")
| StarcoderdataPython |
9620594 | from collections import defaultdict
from biicode.common.find.finder_request import FinderRequest
from biicode.common.model.resource import Resource
from biicode.common.edition.block_holder import BlockHolder
from biicode.common.model.symbolic.reference import ReferencedDependencies
from biicode.common.model.symbolic.block_version_table import BlockVersionTable
from biicode.common.model.symbolic.block_version import BlockVersion
class HiveHolder(object):
def __init__(self, dict_cells, dict_contents):
self.hive_dependencies = None # MUST BE ALWAYS BE ASSIGNED before usage
self.settings = None
resource_dict = defaultdict(list)
for block_cell_name, cell in dict_cells.iteritems():
content = dict_contents.get(block_cell_name)
resource_dict[block_cell_name.block_name].append(Resource(cell, content))
self._block_holders = {block_name: BlockHolder(block_name, resources)
for block_name, resources in resource_dict.iteritems()}
def __repr__(self):
result = []
for bh in self.block_holders:
result.append(repr(bh))
return '\n'.join(result)
def delete_empty_blocks(self):
for block_name in self.blocks:
if not self._block_holders[block_name].resources:
del self._block_holders[block_name]
def delete_block(self, block_name):
del self._block_holders[block_name]
@property
def block_holders(self):
return self._block_holders.itervalues()
def __getitem__(self, key):
return self._block_holders[key]
def add_holder(self, block_holder):
self._block_holders[block_holder.block_name] = block_holder
@property
def versions(self):
""" given a set of block_names in edition (blocks folder), and a tracking BlockVersionTable
return the current versions of such edition blocks, that is, time = None
return: BlockVersionTable{ BlockName: BlockVersion time=None}
"""
edition_versions = BlockVersionTable()
for block_holder in self._block_holders.itervalues():
parent = block_holder.parent
edition_versions.add_version(BlockVersion(parent.block, None))
return edition_versions
@property
def blocks(self):
return set(self._block_holders.keys())
@property
def resources(self):
result = {}
for block_holder in self._block_holders.itervalues():
for resource in block_holder.resources.itervalues():
result[resource.name] = resource
return result
def find_request(self, policy):
request = FinderRequest(policy)
request.existing = self.external_dependencies()
blocks = self.blocks
# ONly those that have a block to be searched for
unresolved = set()
local_unresolved = set()
for block_holder in self.block_holders:
includes = block_holder.includes
paths_size = len(block_holder.paths)
for declaration in self.external_unresolved():
try:
new_declaration, _ = declaration.prefix(includes, paths_size)
except:
new_declaration = declaration
decl_block = new_declaration.block()
if decl_block and decl_block not in blocks:
unresolved.add(new_declaration)
else:
local_unresolved.add(new_declaration)
request.unresolved = unresolved
request.block_names = self.blocks
return request, local_unresolved
def external_unresolved(self):
unresolved = set()
for block_holder in self.block_holders:
unresolved.update(block_holder.unresolved())
return unresolved
def external_dependencies(self):
result = ReferencedDependencies()
blocks = self.blocks
for block_holder in self.block_holders:
dep_table = block_holder.requirements
for block_name, version in dep_table.iteritems():
if block_name not in blocks:
result[version] # defaultdict will create empty item
for resource in block_holder.simple_resources:
cell = resource.cell
for declaration in cell.dependencies.resolved:
targets = declaration.match(cell.dependencies.explicit)
if targets:
block = declaration.block()
if block not in blocks:
block_version = dep_table[block]
result[block_version][declaration].update(targets)
return result
| StarcoderdataPython |
75084 | <gh_stars>10-100
# iterating_over_cells_in_rows.py
from openpyxl import load_workbook
def iterating_over_values(path, sheet_name):
workbook = load_workbook(filename=path)
if sheet_name not in workbook.sheetnames:
print(f"'{sheet_name}' not found. Quitting.")
return
sheet = workbook[sheet_name]
for value in sheet.iter_rows(
min_row=1, max_row=3, min_col=1, max_col=3,
values_only=False):
print(value)
if __name__ == "__main__":
iterating_over_values("books.xlsx", sheet_name="Sheet 1 - Books")
| StarcoderdataPython |
9748050 | <reponame>ramnes/sftpgo-client
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..types import UNSET, Unset
T = TypeVar("T", bound="FTPPassivePortRange")
@attr.s(auto_attribs=True)
class FTPPassivePortRange:
""" """
start: Union[Unset, int] = UNSET
end: Union[Unset, int] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
start = self.start
end = self.end
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if start is not UNSET:
field_dict["start"] = start
if end is not UNSET:
field_dict["end"] = end
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
start = d.pop("start", UNSET)
end = d.pop("end", UNSET)
ftp_passive_port_range = cls(
start=start,
end=end,
)
ftp_passive_port_range.additional_properties = d
return ftp_passive_port_range
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| StarcoderdataPython |
3501305 | <reponame>songanz/RL_for_AV
import importlib
import matplotlib as mpl
import matplotlib.cm as cm
import numpy as np
class ValueIterationGraphics(object):
"""
Graphical visualization of the Value Iteration value function.
"""
BLACK = (0, 0, 0)
RED = (255, 0, 0)
highway_module = None
@classmethod
def display(cls, agent, surface):
"""
Display the computed value function of an agent.
:param agent: the agent to be displayed
:param surface: the surface on which the agent is displayed.
"""
if not cls.highway_module:
try:
cls.highway_module = importlib.import_module("highway_env.envs")
except ModuleNotFoundError:
pass
if cls.highway_module and isinstance(agent.env, cls.highway_module.abstract.AbstractEnv):
cls.display_highway(agent, surface)
@classmethod
def display_highway(cls, agent, surface):
"""
Particular visualization of the state space that is used for highway_env environments only.
:param agent: the agent to be displayed
:param surface: the surface on which the agent is displayed.
"""
import pygame
norm = mpl.colors.Normalize(vmin=-2, vmax=2)
cmap = cm.jet_r
try:
grid_shape = agent.mdp.original_shape
except AttributeError:
grid_shape = cls.highway_module.finite_mdp.compute_ttc_grid(agent.env, time_quantization=1., horizon=10.).shape
cell_size = (surface.get_width() // grid_shape[2], surface.get_height() // (grid_shape[0] * grid_shape[1]))
velocity_size = surface.get_height() // grid_shape[0]
value = agent.state_value().reshape(grid_shape)
for h in range(grid_shape[0]):
for i in range(grid_shape[1]):
for j in range(grid_shape[2]):
color = cmap(norm(value[h, i, j]), bytes=True)
pygame.draw.rect(surface, color, (
j * cell_size[0], i * cell_size[1] + h * velocity_size, cell_size[0], cell_size[1]), 0)
pygame.draw.line(surface, cls.BLACK,
(0, h * velocity_size), (grid_shape[2] * cell_size[0], h * velocity_size), 1)
states, actions = agent.plan_trajectory(agent.mdp.state)
for state in states:
(h, i, j) = np.unravel_index(state, grid_shape)
pygame.draw.rect(surface, cls.RED,
(j * cell_size[0], i * cell_size[1] + h * velocity_size, cell_size[0], cell_size[1]), 1)
| StarcoderdataPython |
4813166 | #!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 PanXu, Inc. All Rights Reserved
#
"""
测试 gaz vocabulary collate
Authors: PanXu
Date: 2021/02/02 10:11:00
"""
import os
import logging
from easytext.data.pretrained_word_embedding_loader import GeneralPretrainedWordEmbeddingLoader
from easytext.utils.json_util import json2str
from ner.data.lattice import GazVocabularyCollate
from ner.data.lattice import Gazetteer
from ner import ROOT_PATH
from ner.tests import ASSERT
def test_gaz_vocabulary_collate(lattice_ner_demo_dataset, gaz_pretrained_embedding_loader):
gazetter = Gazetteer(gaz_pretrained_word_embedding_loader=gaz_pretrained_embedding_loader)
gaz_vocabulary_collate = GazVocabularyCollate(gazetteer=gazetter)
words_list = gaz_vocabulary_collate(lattice_ner_demo_dataset)
logging.info(json2str(words_list))
# 相应的句子是: "陈元呼吁加强国际合作推动世界经济发展", 得到的 gaz words 是
expect_0 = ["陈元", "呼吁", "吁加", "加强", "强国", "国际", "合作", "推动", "世界", "经济", "发展"]
gaz_words_0 = words_list[0]
ASSERT.assertListEqual(expect_0, gaz_words_0)
# 新华社华盛顿4月28日电(记者翟景升)
expect_1 = ["新华社", "新华", "华社", "社华", "华盛顿", "华盛", "盛顿", "记者", "翟景升", "翟景", "景升"]
gaz_words_1 = words_list[1]
ASSERT.assertListEqual(expect_1, gaz_words_1)
| StarcoderdataPython |
3202310 | """
@brief PyTorch training code.
@author <NAME> (<EMAIL>)
@date July 2021
"""
import argparse
from decimal import Decimal
import json
from tqdm import tqdm
import numpy as np
import torch
from torch.optim import Adam
import torchnet as tnt
from src.network_architectures.custom_3dunet.unet import UNet3D
from src.engine.engine import Engine
from src.dataset.dataset import Fetal3DSegDataset, Fetal3DSegPipeline
from src.sampler.weighted_sampler import WeightedSampler
from src.sampler.batch_weighted_sampler import BatchWeightedSampler
from src.utils.definitions import *
from src.engine.dali_iterator import PyTorchIterator
# You need to install my python package for the label-set loss function
# https://github.com/LucasFidon/label-set-loss-functions
from label_set_loss_functions.loss import LeafDiceLoss, MarginalizedDiceLoss, MeanDiceLoss
SUPPORTED_LOSS = ['mean_dice', 'leaf_dice', 'marginalized_dice']
SUPPORTED_MODEL = ['unet']
SUPPORTED_OPTIMIZER = ['adam']
# Comment this for deterministic behaviour or when using variable patch size
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
parser = argparse.ArgumentParser(
description='Segmentation training script')
# Model options
parser.add_argument('--model', default='unet', type=str)
parser.add_argument('--norm', default='instance', type=str, help='instance or batch')
parser.add_argument('--num_classes', default=NUM_CLASS, type=int)
parser.add_argument('--save', default='./logs/test_fetal3d_seg', type=str,
help='save parameters and logs in this folder')
parser.add_argument('--resume', default='', type=str,
help='(optional) resume training from the checkpoint indicated; ex: ./logs/model_iter12000.pt7')
parser.add_argument('--dtype', default='float', type=str)
parser.add_argument('--nthread', default=4, type=int)
# Training options
parser.add_argument('--loss', default='mean_dice', type=str,
help='Available options are %s' % SUPPORTED_LOSS)
parser.add_argument('--epochs', default=500, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--save_every_n_iter', default=100, type=int) # 100
parser.add_argument('--weight_decay', default=0., type=float)
# Misc optimization options
parser.add_argument('--optim', default='adam')
parser.add_argument('--batch_size', default=3, type=int)
parser.add_argument('--patch_size', default='[144,160,144]', type=str)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--momentum', default=0.9, type=float,
help='Momentum value to use for the optimizer.')
# Options for data augmentation
parser.add_argument('--no_flip', action='store_true',
help='Do not use right-left flip augmentation during training with proba 0.5')
parser.add_argument('--flip_all', action='store_true',
help='Use flip augmentation (all axis) during training with proba 0.5')
parser.add_argument('--no_data_augmentation', action='store_true')
parser.add_argument('--gaussian_blur', action='store_true')
parser.add_argument('--no_zoom', action='store_true')
# Options to save GPU memory
parser.add_argument('--grad_check', action='store_true',
help="(optional; recommended) It activates gradient checkpointing."
" Cannot be combined with --fp16")
parser.add_argument('--fp16', action='store_true',
help='(optional) It activates mixed precision. Cannot be combined with --grad_check.')
# Data
parser.add_argument('--data_csv',
default = os.path.join(
'/data',
'fetal_brain_srr_parcellation/training.csv'
),
type=str,
help='path to a csv file that maps sample id '
'to image path and segmentation path')
def get_network(model, num_channels, num_classes, norm='instance', grad_check=False):
"""
Return the deep neural network architecture.
:param num_channels: number of input channels.
:return: neural network model.
"""
assert model in SUPPORTED_MODEL, 'Model %s not supported' % model
print('A 3D U-Net is used')
network = UNet3D(
in_channels=num_channels,
out_classes=num_classes,
out_channels_first_layer=30,
num_encoding_blocks=5,
residual=False,
normalization=norm,
padding=True,
activation='LeakyReLU',
upsampling_type='trilinear',
gradient_checkpointing=grad_check,
)
# Put the model on the gpu.
network.cuda()
# Set the network in training mode
network.train()
return network
def get_loss(loss_name='mean_dice'):
if loss_name == 'mean_dice':
print('Use the mean Dice loss (denominator squared)')
loss = MeanDiceLoss(squared=True)
return loss
elif loss_name == 'mean_dice_partial':
print('Use the marginalized Dice loss')
print('Labels supersets:')
print(LABELSET_MAP)
loss = MarginalizedDiceLoss(LABELSET_MAP)
return loss
elif loss_name == 'leaf_dice':
print('Use the mean Dice loss with soft labels ground-truth (denominator squared)')
print('Labels supersets:')
print(LABELSET_MAP)
loss = LeafDiceLoss(LABELSET_MAP)
return loss
else:
raise NotImplementedError('Loss function %s not defined' % loss_name)
def main(opt):
"""
Train a network for segmentation.
:param opt: parsed command line arguments.
"""
def create_iterator(opt):
"""
Return a PyTorch data loader.
This is a pipeline that includes:
- data loading
- random indices sampling
- data normalization / augmentation
:return: PyTorch data loader
"""
# Create the dataset (data reader)
dataset = Fetal3DSegDataset(
data_csv=opt.data_csv,
use_data_augmentation=not opt.no_data_augmentation,
use_zoom=not opt.no_zoom,
)
# Create the index batch sampler
idx_sampler = WeightedSampler(
num_samples=len(dataset),
weights=1,
)
batch_idx_sampler = BatchWeightedSampler(
sampler=idx_sampler,
batch_size=opt.batch_size,
drop_last=False,
)
# Create the data normalization/augmentation pipeline
dali_pipeline = Fetal3DSegPipeline(
dataset,
batch_index_sampler=batch_idx_sampler,
patch_size=json.loads(opt.patch_size),
num_threads=opt.nthread,
do_flip=not opt.no_flip,
do_flip_all=opt.flip_all,
do_gauss_blur=opt.gaussian_blur,
do_zoom=False, # todo
)
# Create the DALI PyTorch dataloader
data_loader = PyTorchIterator(
pipelines=dali_pipeline,
size=len(dataset),
output_map=['img', 'seg', 'idx'],
# if True the last batch is completed to have a length equal to batch_size.
# However, DALI is not using the batch sampler to select the indices
# used to fill the last batch...
fill_last_batch=True,
# if False samples used to complete the previous last batch
# are removes from the next epoch.
last_batch_padded=True,
auto_reset=True,
)
return data_loader
def create_optimizer(opt, lr):
# Create the optimizer
if opt.optim == 'adam':
print('Create Adam optimizer with lr=%f, momentum=%f' % (lr, opt.momentum))
optim = Adam(network.parameters(),
lr,
betas=(opt.momentum, 0.999),
weight_decay=opt.weight_decay,
amsgrad=False
)
else:
return ValueError('Optimizer %s not supported' % opt.optim)
return optim
def infer_and_eval(sample):
"""
:param sample: couple of tensors; input batch and
corresponding ground-truth segmentations.
:return: float, 1d tensor; mean loss for the input batch
and None or batch of predicted segmentations.
"""
# DALI data loading pipeline is used
inputs = sample[0]['img']
targets = torch.squeeze(sample[0]['seg'], dim=1)
y = network(inputs)
loss = loss_func(y, targets) # float; mean batch loss
del y
return loss
def log(t, state):
"""
Save the network parameters, the weights, and the logs.
:param t: dict; Contains info about current hyperparameters value.
:param state: dict; Contains info about current hyperparameters value.
"""
torch.save(
dict(params=network.state_dict(),
epoch=t['epoch'],
iter=t['iter'],
optimizer=state['optimizer'].state_dict()),
os.path.join(opt.save, 'model_iter%d.pt7' % t['iter'])
)
z = {**vars(opt), **t}
# Write the logs for epoch t.
with open(os.path.join(opt.save, 'log.txt'), 'a') as flog:
flog.write('json_stats: ' + json.dumps(z) + '\n')
print(z)
def on_sample(state):
"""
Called after a batch is drawn from the training dataset.
:param state: dict.
"""
pass
def on_forward(state):
"""
Called at the beginning of each forward pass.
:param state: dict.
"""
loss = float(state['loss'])
# Update running average and std for the loss value.
meter_loss.add(loss)
if state['train']:
state['iterator'].set_postfix(loss=loss)
def on_start(state):
"""
Called only once, at the beginning of the training.
:param state: dict.
"""
state['epoch'] = epoch
state['t'] = iter
def on_start_epoch(state):
"""
Called at the beginning of each epoch.
:param state: dict.
"""
meter_loss.reset()
timer_train.reset()
epoch = state['epoch'] + 1
if epoch in epoch_step and opt.lr_decay_ratio != 1:
# learning rate decay
lr = state['optimizer'].param_groups[0]['lr']
state['optimizer'] = create_optimizer(opt, lr * opt.lr_decay_ratio)
# reinitialize the progress bar for the next epoch
state['iterator'] = tqdm(train_loader, dynamic_ncols=True)
def save_model_and_logs(state):
"""
Called at the end of each epoch.
Aggregate logs at the end of each epoch, and print them.
:param state: dict.
"""
train_loss = meter_loss.value() # mean and std
train_time = timer_train.value()
meter_loss.reset()
timer_test.reset()
logs_dict = {
"train_loss": train_loss[0],
"train_loss_std": train_loss[1],
"epoch": state['epoch'],
"iter": state['t'],
"num_classes": num_classes,
"n_parameters": n_parameters,
"train_time": train_time,
}
print(log(logs_dict, state))
print('==> id: %s, epoch (%d/%d), iter %d, training loss: \33[91m%.3E\033[0m' %
(opt.save, state['epoch'], opt.epochs, state['t'], Decimal(train_loss[0])))
epoch_step = json.loads(opt.epoch_step)
num_classes = opt.num_classes
num_channels = 1
# Create the dataloader.
# It loads the data, pre-process them, and give them to the network.
train_loader = create_iterator(opt)
# Create the network that will be trained.
network = get_network(
opt.model, num_channels, num_classes, norm=opt.norm, grad_check=opt.grad_check)
# print(network)
# Create the loss function to use for training the network.
loss_func = get_loss(opt.loss)
optimizer = create_optimizer(opt, opt.lr)
epoch = 1
iter = 1
# (optional) Restore the parameters of the network to resume training
# from a previous session.
if opt.resume != '':
state_dict = torch.load(opt.resume)
epoch = state_dict['epoch']
iter = state_dict['iter']
print("Resume training from iter %d" % iter)
# Restore the model parameters
network.load_state_dict(state_dict['params'])
optimizer.load_state_dict(state_dict['optimizer'])
if opt.dro:
# Restore the weights of the sampler (if applicable)
weights_path = os.path.join(os.path.dirname(opt.resume),
'weights_iter%d.pt7' % iter)
train_loader.batch_sampler.sampler.load_weights(weights_path)
# Print the number of parameters
trainable_model_parameters = filter(
lambda p: p.requires_grad, network.parameters())
n_parameters = int(sum([np.prod(p.size()) for p in trainable_model_parameters]))
print('\nTotal number of parameters:', n_parameters)
# Maintain average and std of the loss for logs.
meter_loss = tnt.meter.AverageValueMeter()
timer_train = tnt.meter.TimeMeter('s')
timer_test = tnt.meter.TimeMeter('s')
if not os.path.exists(opt.save):
os.mkdir(opt.save)
engine = Engine()
engine.hooks['on_sample'] = on_sample
engine.hooks['on_forward'] = on_forward
engine.hooks['on_start_epoch'] = on_start_epoch
# engine.hooks['on_end_epoch'] = on_end_epoch
engine.hooks['on_every_n_iter'] = save_model_and_logs
engine.hooks['on_start'] = on_start
# Train the network.
engine.train(infer_and_eval, train_loader, opt.epochs, optimizer,
every_n_iter=opt.save_every_n_iter, fp16=opt.fp16)
if __name__ == '__main__':
opt = parser.parse_args()
print('parsed options:', vars(opt))
if not os.path.exists('logs'):
os.mkdir('logs')
if not os.path.exists(opt.save):
os.mkdir(opt.save)
main(opt)
| StarcoderdataPython |
1891574 | # MIT License
# Copyright (c) 2020 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
"""
Utility functions for training one epoch
and evaluating one epoch
"""
import torch
import random as rd
import torch.nn as nn
import math
from .metrics import accuracy_MNIST_CIFAR as accuracy
def train_epoch(model, optimizer, device, data_loader, epoch, augmentation, flip, distortion):
model.train()
epoch_loss = 0
epoch_train_acc = 0
nb_data = 0
gpu_mem = 0
for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
batch_snorm_n = batch_snorm_n.to(device)
if augmentation > 1e-7:
batch_graphs_eig = batch_graphs.ndata['eig'].clone()
angle = (torch.rand(batch_x[:, 0].shape) - 0.5) * 2 * augmentation
sine = torch.sin(angle * math.pi / 180)
batch_graphs.ndata['eig'][:, 1] = torch.mul((1 - sine**2)**(0.5), batch_graphs_eig[:, 1]) \
+ torch.mul(sine, batch_graphs_eig[:, 2])
batch_graphs.ndata['eig'][:, 2] = torch.mul((1 - sine**2) ** (0.5), batch_graphs_eig[:, 2]) \
- torch.mul(sine, batch_graphs_eig[:, 1])
if flip:
batch_graphs_eig = batch_graphs.ndata['eig'][:, 2].to(device)
sign_flip = torch.rand(batch_graphs_eig.size()).to(device)
sign_flip[sign_flip >= 0.5] = 1.0; sign_flip[sign_flip < 0.5] = -1.0
batch_graphs.ndata['eig'][:, 2] = torch.mul(sign_flip, batch_graphs_eig)
if distortion > 1e-7:
batch_graphs_eig = batch_graphs.ndata['eig'].clone()
dist = (torch.rand(batch_x[:, 0].shape) - 0.5) * 2 * distortion
batch_graphs.ndata['eig'][:, 1] = torch.mul(dist, torch.mean(torch.abs(batch_graphs_eig[:, 1]), dim=-1, keepdim=True)) + batch_graphs_eig[:, 1]
batch_graphs.ndata['eig'][:, 2] = torch.mul(dist, torch.mean(torch.abs(batch_graphs_eig[:, 2]), dim=-1, keepdim=True)) + batch_graphs_eig
optimizer.zero_grad()
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_labels)
loss.backward()
optimizer.step()
epoch_loss += loss.detach().item()
epoch_train_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
if augmentation > 1e-7 or distortion > 1e-7:
batch_graphs.ndata['eig'] = batch_graphs_eig.detach()
epoch_loss /= (iter + 1)
epoch_train_acc /= nb_data
return epoch_loss, epoch_train_acc, optimizer
def evaluate_network(model, device, data_loader, epoch):
model.eval()
epoch_test_loss = 0
epoch_test_acc = 0
nb_data = 0
with torch.no_grad():
for iter, (batch_graphs, batch_labels, batch_snorm_n, batch_snorm_e) in enumerate(data_loader):
batch_x = batch_graphs.ndata['feat'].to(device)
batch_e = batch_graphs.edata['feat'].to(device)
batch_snorm_e = batch_snorm_e.to(device)
batch_labels = batch_labels.to(device)
batch_snorm_n = batch_snorm_n.to(device)
batch_scores = model.forward(batch_graphs, batch_x, batch_e, batch_snorm_n, batch_snorm_e)
loss = model.loss(batch_scores, batch_labels)
epoch_test_loss += loss.detach().item()
epoch_test_acc += accuracy(batch_scores, batch_labels)
nb_data += batch_labels.size(0)
epoch_test_loss /= (iter + 1)
epoch_test_acc /= nb_data
return epoch_test_loss, epoch_test_acc | StarcoderdataPython |
3328577 | import calendar
import datetime
import os
import shutil
import subprocess
import time
import webbrowser
import csv
from functools import wraps
from string import digits
import arcpy
from sqlalchemy import exc, func, distinct, extract
import amaptor
import geodatabase_tempfile
import launchR
from arcproject.scripts import chl_decision_tree
from arcproject.scripts import config
from arcproject.scripts import linear_ref
from arcproject.scripts import mapping
from arcproject.scripts import swap_site_recs
from arcproject.scripts import wq_gain
from arcproject.scripts import wqt_timestamp_match
from arcproject.scripts.mapping import generate_layer_for_month, WQMappingBase
from arcproject.waterquality import classes
def parameters_as_dict(f):
@wraps(f)
def wrapper(*args, **kwargs):
params = args[1]
parameters = {}
for param in params:
parameters[param.name] = param
f(self=args[0], parameters=parameters, messages=args[1])
return wrapper
class Toolbox(object):
def __init__(self):
"""Define the toolbox (the name of the toolbox is the name of the .pyt file)."""
self.label = "ArcProject WQ Toolbox"
self.alias = "ArcProject WQ Toolbox"
# List of tool classes associated with this toolbox
self.tools = [AddSite, AddGainSite, JoinTimestamp,
GenerateWQLayer, GainToDB, GenerateMonth, ModifyWQSite, GenerateHeatPlot,
GenerateSite, ModifySelectedSite, GenerateMap, DeleteMonth, LinearRef, RenameGrabs,
RegressionPlot, CorrectChl, ExportHeatPlotData]
class AddSite(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "New Transect Site"
self.description = "Add a new water quality transect site to the database. Each slough should have it's own unique site id."
self.canRunInBackground = False
self.category = "Create New Sites"
def getParameterInfo(self):
site_name = arcpy.Parameter(
displayName="Site Name",
name="site_name",
datatype="GPString",
multiValue=False,
direction="Input"
)
site_code = arcpy.Parameter(
displayName="Site Code",
name="site_code",
datatype="GPString",
multiValue=False,
direction="Input"
)
params = [site_name, site_code]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
session = classes.get_new_session()
try:
site = classes.Site()
site.code = parameters[1].valueAsText.upper()
site.name = parameters[0].valueAsText
session.add(site)
session.commit()
except exc.IntegrityError as e:
arcpy.AddMessage("{} already exists. Site IDs must be unique.".format(site.code))
finally:
session.close()
class AddGainSite(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "New Profile Site"
self.description = "Create a new vertical profile site"
self.canRunInBackground = False
self.category = "Create New Sites"
def getParameterInfo(self):
abbr = arcpy.Parameter(
displayName="Profile Abbreviation",
name="abbr",
datatype="GPString",
multiValue=False,
direction="Input"
)
slough = arcpy.Parameter(
displayName="Transect?",
name="slough",
datatype="GPString",
multiValue=False,
direction="Input",
parameterType="Optional",
)
params = [abbr, slough]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# fill in the sloughs with options already in the database
if parameters[0].valueAsText:
session = classes.get_new_session()
try:
q = session.query(classes.Site.code).distinct().all()
sites = []
# add profile name to site list
for site in q:
print(site[0])
sites.append(site[0])
parameters[1].filter.type = 'ValueList'
parameters[1].filter.list = sites
finally:
session.close()
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
@parameters_as_dict
def execute(self, parameters, messages):
abbr = parameters["abbr"].valueAsText
slough = parameters["slough"].valueAsText
ps = classes.ProfileSite()
ps.abbreviation = abbr.upper()
if slough is not None:
ps.slough = slough.upper()
# add to db
session = classes.get_new_session()
try:
session.add(ps)
session.commit()
except exc.IntegrityError as e:
arcpy.AddMessage("{} already exists. Skipping.".format(ps.abbreviation))
finally:
session.close()
class JoinTimestamp(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Add Transects"
self.description = "Join water quality transect to gps using time stamp and add to database"
self.canRunInBackground = False
self.category = "Add WQ Data"
def getParameterInfo(self):
"""Define parameter definitions"""
# parameter info for selecting multiple csv water quality files
wqt = arcpy.Parameter(
displayName="Transect Water Quality Data",
name="wqt",
datatype="DEFile",
multiValue=True,
direction="Input"
)
# shapefile for the transects GPS breadcrumbs
bc = arcpy.Parameter(
displayName="Transect Shapefile",
name="shp_file",
datatype="DEFeatureClass",
direction="Input"
)
site = arcpy.Parameter(
displayName="Site Code (Leave blank to detect from filename)",
name="site_code",
datatype="GPString",
direction="Input",
parameterType="Optional",
)
out = arcpy.Parameter(
displayName="Joined Output",
name="Output",
datatype="DEFeatureClass",
direction="Output",
parameterType="Optional"
)
params = [wqt, bc, site, out]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
wq_transect_list = parameters[0].valueAsText.split(";")
pts = parameters[1].valueAsText
site_code = parameters[2].valueAsText
if not site_code or site_code == "":
site_function = wqt_timestamp_match.site_function_historic
else:
site_function = site_code
output_path = parameters[3].valueAsText
if output_path == "":
output_path = None
# run wq_join_match
wqt_timestamp_match.main(wq_transect_list, pts, output_feature=output_path, site_function=site_function)
pass
class GainToDB(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Add Gain Profiles"
self.description = ""
self.canRunInBackground = False
self.category = "Add WQ Data"
def getParameterInfo(self):
"""Define parameter definitions"""
# parameter info for selecting multiple csv water quality files
wqp = arcpy.Parameter(
displayName="Vertical Profile File",
name="wqp_files",
parameterType="GPValueTable",
multiValue=True,
direction="Input"
)
wqp.columns = [['DEFile', 'Filename'], ['GPString', 'Site ID'], ['GPString', 'Gain Type']]
# TODO get list of gain settings from the data base?
wqp.filters[2].type = 'ValueList'
wqp.filters[2].list = ['0', '1', '10', '100']
bool = arcpy.Parameter(
displayName="Fill in table by parsing filename?",
name="bool",
datatype="GPBoolean",
parameterType="Optional"
)
# site_part = arcpy.Parameter(
# displayName="Part of filename with site code (split by underscores)?",
# name="site",
# datatype="GPLong",
# parameterType="Optional"
# )
#
# site_part.value = 3
# site_part.filter.type = "ValueList"
# site_part.filter.list = [1, 2, 3, 4, 5, 6]
#
#
# gain_part = arcpy.Parameter(
# displayName="Part of filename with gain code (split by underscores)?",
# name="gain",
# datatype="GPLong",
# parameterType="Optional"
# )
#
# gain_part.value = 5
# gain_part.filter.type = "ValueList"
# gain_part.filter.list = [1, 2, 3, 4, 5, 6]
params = [wqp, bool,]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
if parameters[0].valueAsText and len(parameters[0].filters[1].list) == 0:
# validate site name by pulling creating filter with names from profile_sites table
# get list of sites from the database profile sites table
session = classes.get_new_session()
try:
profiles = session.query(classes.ProfileSite.abbreviation).distinct().all()
# print(profiles) # [(u'TES1',), (u'TES2',), (u'TS1',)]
profile_names = []
# add profile name to site list
for profile in profiles:
print(profile[0])
profile_names.append(profile[0])
parameters[0].filters[1].type = 'ValueList'
parameters[0].filters[1].list = profile_names
finally:
session.close()
# updates the value table using the values parsed from the file name
if parameters[1].value:
vt = parameters[0].values # values are list of lists
for i in range(0, len(vt)):
filename = vt[i][0]
basename = os.path.basename(str(filename))
base = os.path.splitext(basename)[0] # rm extension if there is one
parts = base.split("_") # split on underscore
#site = parts[int(parameters[2].value)-1]
site = parts[wqt_timestamp_match.site_function_params.get('site_part')]
#gain = parts[int(parameters[3].value)-1]
gain = parts[wqt_timestamp_match.site_function_params.get('gain_part')]
vt[i][0] = str(filename)
vt[i][1] = site
# strip all letters from gain setting ("GN10" -> 10)
digits_only = ''.join(c for c in gain if c in digits)
gain = int(digits_only)
vt[i][2] = gain
parameters[0].values = vt
# set checkbox to false
parameters[1].value = False
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
# get the parameters
vt = parameters[0].values # values are list of lists
for i in range(0, len(vt)):
wq_gain_file = str(vt[i][0])
basename = os.path.basename(str(wq_gain_file))
vt[i][0] = str(wq_gain_file)
site_id = vt[i][1] # site
gain_setting = vt[i][2] # gain
arcpy.AddMessage("{} {} {}".format(basename, site_id, gain_setting))
try:
wq_gain.main(wq_gain_file, site_id, gain_setting)
except exc.IntegrityError as e:
arcpy.AddMessage("Unable to import gain file. Record for this gain file "
"already exists in the vertical_profiles table.")
return
class GenerateWQLayer(WQMappingBase):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Map Layer - Single Day"
self.description = ""
self.canRunInBackground = False
self.category = "Mapping"
super(GenerateWQLayer, self).__init__()
def getParameterInfo(self):
"""Define parameter definitions"""
# parameter info for selecting multiple csv water quality files
date_to_generate = arcpy.Parameter(
displayName="Date to Generate Layer For",
name="date_to_generate",
datatype="GPDate",
multiValue=False,
direction="Input"
)
# shapefile for the transects GPS breadcrumbs
fc = arcpy.Parameter(
displayName="Output Feature Class",
name="output_feature_class",
datatype="DEFeatureClass",
direction="Output"
)
fc = mapping.set_output_symbology(fc)
params = [date_to_generate, self.select_wq_param, fc, ]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
arcpy.env.addOutputsToMap = True
date_to_use = parameters[0].value
output_location = parameters[2].valueAsText
arcpy.AddMessage("Output Location: {}".format(output_location))
mapping.layer_from_date(date_to_use, output_location)
self.insert_layer(output_location, parameters[1])
class GenerateMonth(WQMappingBase):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Map Layer - Full Month"
self.description = "Generate a layer of all the water quality transects for a given month and year"
self.canRunInBackground = False
self.category = "Mapping"
super(GenerateMonth, self).__init__()
def getParameterInfo(self):
"""Define parameter definitions"""
# parameter info for selecting multiple csv water quality files
# shapefile for the transects GPS breadcrumbs
fc = arcpy.Parameter(
displayName="Output Feature Class",
name="output_feature_class",
datatype="DEFeatureClass",
direction="Output"
)
params = [self.year_to_generate, self.month_to_generate, self.select_wq_param, fc, ]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
self.update_month_fields(parameters)
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
try:
arcpy.env.addOutputsToMap = True
year_to_use, month_to_use = self.convert_year_and_month(year=parameters[0], month=parameters[1])
arcpy.AddMessage("YEAR: {}, MONTH: {}".format(year_to_use, month_to_use))
output_location = parameters[3].valueAsText
generate_layer_for_month(month_to_use, year_to_use, output_location)
self.insert_layer(output_location, parameters[2])
finally:
self.cleanup() # clean up from tool setup
class GenerateMap(WQMappingBase):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
# call the setup first, because we want to overwrite the labels, etc
super(GenerateMap, self).__init__()
self.label = "Generate Map for Export"
self.description = "Generates a map document and optional static image/PDF maps for symbolized water quality data for a month"
self.canRunInBackground = False
self.category = "Mapping"
def getParameterInfo(self):
"""Define parameter definitions"""
### There may not be a type for this in Pro, so we should not add the parameter, and instead add a new map in the CURRENT document
if amaptor.PRO:
map_output = arcpy.Parameter(
displayName="Name of New Map in Current Project",
name="output_map",
datatype="GPString",
direction="Output"
)
else: # using ArcMap
map_output = arcpy.Parameter(
displayName="Output ArcGIS Map Location",
name="output_map",
datatype="DEMapDocument",
direction="Output"
)
export_pdf = arcpy.Parameter(
displayName="Output Path for PDF",
name="output_pdf",
datatype="DEFile",
direction="Output",
parameterType="Optional",
category="Static Map Exports",
)
export_png = arcpy.Parameter(
displayName="Output Path for PNG",
name="output_png",
datatype="DEFile",
direction="Output",
parameterType="Optional",
category="Static Map Exports",
)
params = [self.year_to_generate, self.month_to_generate, self.select_wq_param, map_output, export_pdf, export_png]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
self.update_month_fields(parameters)
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""
Generates the map and exports any necessary static maps
:param parameters:
:return:
"""
try:
arcpy.env.addOutputsToMap = False
year_to_use, month_to_use = self.convert_year_and_month(year=parameters[0], month=parameters[1])
symbology_param = parameters[2]
template = mapping.arcgis_10_template
output_map_path = parameters[3].valueAsText
output_pdf_path = parameters[4].valueAsText
output_png_path = parameters[5].valueAsText
new_layout_name = "{} Layout".format(output_map_path)
if amaptor.PRO:
if "testing_project" in globals(): # this is a hook for our testing code to set a value in the module and have this use it instead of "CURRENT"
project = globals()["testing_project"]
else:
project = "CURRENT"
map_project = amaptor.Project(project)
new_map = map_project.new_map(name=output_map_path, template_map=template, template_df_name="ArcProject Map")
new_layout = map_project.new_layout(name=new_layout_name, template_layout=mapping.arcgis_pro_layout_template, template_name="base_template")
new_layout.frames[0].map = new_map # rewrite the data frame map to be the map object of the new map
output_location = geodatabase_tempfile.create_gdb_name(name_base="generated_month_layer", gdb=map_project.primary_document.defaultGeodatabase)
else:
shutil.copyfile(template, output_map_path)
map_project = amaptor.Project(output_map_path)
new_map = map_project.maps[0] # it'll be the first map, because it's the only data frame in the template
output_location = geodatabase_tempfile.create_gdb_name(name_base="generated_month_layer")
arcpy.AddMessage("Map Document set up complete. Creating new layer")
generate_layer_for_month(month_to_use, year_to_use, output_location)
self.insert_layer(output_location, symbology_param, map_or_project=new_map)
new_layer = new_map.find_layer(path=output_location)
new_layer.name = symbology_param.valueAsText
new_map.zoom_to_layer(layer=new_layer, set_layout="ALL")
new_map.replace_text("{wq_month}", "{} {}".format(parameters[1].valueAsText, parameters[0].valueAsText)) # Add the month and year to the title
map_project.save()
if output_png_path and output_png_path != "":
new_map.export_png(output_png_path, resolution=300)
if output_pdf_path and output_pdf_path != "":
new_map.export_pdf(output_pdf_path)
if amaptor.PRO:
arcpy.AddMessage("Look for a new map named \"{}\" and a new layout named \"{}\" in your Project pane".format(output_map_path, new_layout_name))
finally:
self.cleanup() # clean up from tool setup
class ModifyWQSite(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Swap Transect Code for ALL records"
self.description = "Modifies the transect code for all records that currently belong to a certain site."
self.canRunInBackground = False
self.category = "Modify"
def getParameterInfo(self):
current_code = arcpy.Parameter(
displayName="Current Site Code for records",
name="site_code",
datatype="GPString",
multiValue=False,
direction="Input"
)
new_code = arcpy.Parameter(
displayName="New Site Code to Assign",
name="new_code",
datatype="GPString",
multiValue=False,
direction="Input"
)
rm = arcpy.Parameter(
displayName="Remove site from sites table?",
name="rm",
datatype="GPBoolean",
direction="Input",
parameterType="Optional",
)
params = [current_code, new_code, rm]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# validate site name by pulling creating filter with names from table
# get list of sites from the database profile sites table
session = classes.get_new_session()
try:
sites = session.query(classes.Site.code).distinct().all()
site_names = []
# add profile name to site list
for s in sites:
site_names.append(s[0])
parameters[0].filter.type = 'ValueList'
parameters[0].filter.list = site_names
# TODO if parameter[0] has value pop from the list for parameter[1] since it would map to self.
parameters[1].filter.type = 'ValueList'
parameters[1].filter.list = site_names
finally:
session.close()
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
current_code = parameters[0].value
new_code = parameters[1].value
bool_rm = parameters[2].value
arcpy.AddMessage("Changing records with {} -> {}".format(current_code, new_code))
c = swap_site_recs.main(current_code, new_code, bool_rm)
arcpy.AddMessage("{} records updated".format(c))
return
class GenerateHeatPlot(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Generate Heatplot"
self.description = ""
self.canRunInBackground = False
self.category = "Mapping"
def getParameterInfo(self):
code = arcpy.Parameter(
displayName="Code for Transect",
name="code",
datatype="GPString",
multiValue=False,
direction="Input"
)
wq_var = arcpy.Parameter(
displayName="Water Quality Variable",
name="wq_var",
datatype="GPString",
multiValue=True,
direction="Input"
)
title = arcpy.Parameter(
displayName="Title for graph",
name="output",
datatype="GPString",
multiValue=False,
direction="Input",
parameterType="Optional"
)
output_folder = arcpy.Parameter(
displayName="Output folder",
name="output_folder",
datatype="DEFolder",
multiValue=False,
direction="Input"
)
wq_var.filter.type = 'ValueList'
wq_var.filter.list = ["temp","ph","sp_cond","salinity", "dissolved_oxygen","dissolved_oxygen_percent",
"dep_25", "par", "rpar","turbidity_sc","chl", "chl_corrected", "m_value"]
params = [code, wq_var, title, output_folder]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# validate site name by pulling creating filter with names from table
# get list of sites from the database profile sites table
session = classes.get_new_session()
try:
sites = session.query(classes.Site).distinct().all()
site_names = []
# add profile name to site list
for s in sites:
combine = s.code + ' - ' + s.name
site_names.append(combine)
parameters[0].filter.type = 'ValueList'
site_names.sort()
parameters[0].filter.list = site_names
finally:
session.close()
return
@parameters_as_dict
def execute(self, parameters, messages):
sitecodename = parameters["code"].valueAsText
sitecode = sitecodename.split(" - ")[0]
wq_var_list = parameters["wq_var"].valueAsText.split(';')
title_param = parameters["output"].valueAsText
output_folder = parameters["output_folder"].valueAsText
### DEFINE DATA PATHS ###
base_path = config.arcwqpro
gen_heat = os.path.join(base_path, "arcproject", "scripts", "generate_heatplots.R")
R = launchR.Interpreter()
for wq_var in wq_var_list:
# set default title
if title_param is None:
title = sitecode.upper() + " - " + wq_var.upper()
else:
title = title_param
try:
R.run(gen_heat, "--args", sitecode, wq_var, title, output_folder)
except launchR.RExecutionError as e:
arcpy.AddWarning("Call to R failed - R gave the following output: {}".format(e.output))
raise
class LinearRef(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Locate WQT Along Reference Route"
self.description = "Locate water quality points using linear referencing to update " \
"the m-value of selected records"
self.canRunInBackground = False
self.category = "Modify"
def getParameterInfo(self):
query = arcpy.Parameter(
displayName="Type of query to select records to modify?",
name="query",
datatype="GPString",
multiValue=False,
direction="Input",
parameterType="Required"
)
query.filter.type = "ValueList"
query.filter.list = ["ALL", "DATERANGE", "IDRANGE"]
over = arcpy.Parameter(
displayName="Overwrite existing M Values?",
name="over",
datatype="GPBoolean",
direction="Input",
parameterType="Optional"
)
over.value = False
date1 = arcpy.Parameter(
displayName="Start date",
name="date1",
datatype="GPDate",
direction="Input",
parameterType="Optional"
)
date2 = arcpy.Parameter(
displayName="End date",
name="date2",
datatype="GPDate",
direction="Input",
parameterType="Optional"
)
id1 = arcpy.Parameter(
displayName="Start ID",
name="id1",
datatype="GPLong",
direction="Input",
parameterType="Optional"
)
id2 = arcpy.Parameter(
displayName="End ID",
name="id2",
datatype="GPLong",
direction="Input",
parameterType="Optional"
)
params = [query, over, date1, date2, id1, id2]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
if parameters[0].valueAsText == "DATERANGE":
parameters[2].enabled = True
parameters[3].enabled = True
else:
parameters[2].enabled = False
parameters[3].enabled = False
if parameters[0].valueAsText == "IDRANGE":
parameters[4].enabled = True
parameters[5].enabled = True
else:
parameters[4].enabled = False
parameters[5].enabled = False
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
query_type = parameters[0].valueAsText
over = parameters[1].value
start_date = parameters[2].value
end_date = parameters[3].value
start_id = parameters[4].value
end_id = parameters[5].value
arcpy.AddMessage("PARAMS: type = {}, overwrite = {}, start date = {}, "
"end date = {}, start id = {}, end id = {}".format(query_type, over, start_date,
end_date, start_id, end_id))
if start_date is not None and end_date is not None:
# round python date time objects to start of the day (in case times are included in tbx input)
start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)
date_range = [start_date, end_date]
else:
date_range = None
if start_id is not None and end_id is not None:
id_range = [start_id, end_id]
else:
id_range = None
arcpy.AddMessage("Updating m values for wqt points. Be patient...")
linear_ref.main(query_type, overwrite=over, dates=date_range, idrange=id_range)
return
class GenerateSite(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Map Layer - One Transect (all days)"
self.description = ""
self.canRunInBackground = False
self.category = "Mapping"
def getParameterInfo(self):
"""Define parameter definitions"""
siteid = arcpy.Parameter(
displayName="Transect",
name="siteid",
datatype="GPString",
multiValue=False,
direction="Input"
)
# shapefile for the transects GPS breadcrumbs
fc = arcpy.Parameter(
displayName="Output Feature Class",
name="output_feature_class",
datatype="DEFeatureClass",
direction="Output"
)
fc = mapping.set_output_symbology(fc)
params = [siteid, fc, ]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# validate site name by pulling creating filter with names from table
# get list of sites from the database profile sites table
session = classes.get_new_session()
try:
sites = session.query(classes.Site).distinct().all()
site_names = []
# add profile name to site list
for s in sites:
combine = s.code + ' - ' + s.name
site_names.append(combine)
parameters[0].filter.type = 'ValueList'
site_names.sort()
parameters[0].filter.list = site_names
finally:
session.close()
return
sitecodename = parameters["code"].valueAsText
sitecode = sitecodename.split(" - ")[0]
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
arcpy.env.addOutputsToMap = True
site_codename = parameters[0].valueAsText
site_code = site_codename.split(" - ")[0]
session = classes.get_new_session()
siteid = swap_site_recs.lookup_siteid(session, site_code)
session.close()
output_location = parameters[1].valueAsText
mapping.generate_layer_for_site(siteid, output_location)
class ModifySelectedSite(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Modify SiteID for Selected Records"
self.description = ""
self.canRunInBackground = False
self.category = "Modify"
def getParameterInfo(self):
"""Define parameter definitions"""
site = arcpy.Parameter(
displayName="New Site for Selected Features",
name="siteid",
datatype="GPString",
multiValue=False,
direction="Input"
)
wq = arcpy.Parameter(
displayName="Water Quality Layer with Selection",
name="shp_file",
datatype="GPFeatureLayer",
direction="Input"
)
params = [wq, site,]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# validate site name by pulling creating filter with names from table
# get list of sites from the database profile sites table
session = classes.get_new_session()
try:
sites = session.query(classes.Site).distinct().all()
site_names = []
# add profile name to site list
for s in sites:
combine = s.code + ' - ' + s.name
site_names.append(combine)
parameters[1].filter.type = 'ValueList'
site_names.sort()
parameters[1].filter.list = site_names
finally:
session.close()
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
siteid_code = parameters[1].valueAsText
site = siteid_code.split(" - ")[0]
# selected features
feature = parameters[0].value
desc = arcpy.Describe(feature)
if desc.FIDSet != '':
num = len(desc.FIDSet.split(";"))
arcpy.AddMessage("Updating {} records".format(num))
ids_2_update = []
with arcpy.da.SearchCursor(feature, ['id']) as cursor:
for row in cursor:
ids_2_update.append(int(row[0]))
arcpy.AddMessage(ids_2_update)
session = classes.get_new_session()
siteid = swap_site_recs.lookup_siteid(session, site)
try:
for i in ids_2_update:
wq = classes.WaterQuality
q = session.query(wq).filter(wq.id == i).one()
q.site_id = int(siteid)
session.commit()
finally:
session.close()
else:
arcpy.AddMessage("No points selected. Make a selection first!")
return
class DeleteMonth(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Delete Records for Month"
self.description = "Deletes the water quality transects and gain files for a given month and year"
self.canRunInBackground = False
self.category = "Modify"
def getParameterInfo(self):
"""Define parameter definitions"""
# parameter info for selecting multiple csv water quality files
year = arcpy.Parameter(
displayName="Year",
name="year",
datatype="GPString",
multiValue=False,
direction="Input"
)
month = arcpy.Parameter(
displayName="Month",
name="monthe",
datatype="GPString",
multiValue=False,
direction="Input"
)
month.filter.type = 'ValueList'
t = list(calendar.month_name)
t.pop(0)
month.filter.list = t
params = [year, month, ]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# get years with data from the database to use as selection for tool input
session = classes.get_new_session()
try:
q = session.query(extract('year', classes.WaterQuality.date_time)).distinct()
print(q)
years = []
# add profile name to site list
for year in q:
print(year[0])
years.append(year[0])
parameters[0].filter.type = 'ValueList'
parameters[0].filter.list = years
finally:
session.close()
# get valid months for the selected year as the options for the tool input
if parameters[0].value:
Y = int(parameters[0].value)
session = classes.get_new_session()
try:
q2 = session.query(extract('month', classes.WaterQuality.date_time)).filter(
extract('year', classes.WaterQuality.date_time) == Y).distinct()
months = []
t = list(calendar.month_name)
for month in q2:
print(month[0])
months.append(t[month[0]])
print(months)
parameters[1].filter.type = 'ValueList'
parameters[1].filter.list = months
finally:
session.close()
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
"""The source code of the tool."""
year_to_use = int(parameters[0].value)
month = parameters[1].value
# look up index position in calender.monthname
t = list(calendar.month_name)
month_to_use = int(t.index(month))
arcpy.AddMessage("YEAR: {}, MONTH: {}".format(year_to_use, month_to_use))
arcpy.AddMessage("WARNING!: this will delete records. Quit now if you don't want to do this.")
for i in range(10, 0, -1):
time.sleep(1)
arcpy.AddMessage(i)
wq = classes.WaterQuality
gn = classes.VerticalProfile
session = classes.get_new_session()
try:
lower_bound = datetime.date(year_to_use, month_to_use, 1)
upper_bound = datetime.date(year_to_use, month_to_use,
int(calendar.monthrange(year_to_use, month_to_use)[1]))
arcpy.AddMessage("Deleting data for {} through {}".format(lower_bound, upper_bound))
q_wq = session.query(wq).filter(wq.date_time > lower_bound, wq.date_time < upper_bound)
arcpy.AddMessage("Deleting transects")
q_wq.delete()
q_gn = session.query(gn).filter(gn.date_time > lower_bound, gn.date_time < upper_bound)
arcpy.AddMessage("Deleting gains")
q_gn.delete()
# commit changes
arcpy.AddMessage("WARNING!: final chance to not commit database change. Exit now!")
for i in range(10, 0, -1):
time.sleep(1)
arcpy.AddMessage(i)
arcpy.AddMessage("Changes committed. Records are deleted.")
session.commit()
finally:
session.close()
return
class RenameGrabs(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Rename grab + profiles for given date"
self.description = ""
self.canRunInBackground = False
self.category = "Regression"
def getParameterInfo(self):
"""Define parameter definitions"""
date_to_generate = arcpy.Parameter(
displayName="Date",
name="date_to_generate",
datatype="GPDate",
multiValue=False,
direction="Input"
)
wqp = arcpy.Parameter(
displayName="",
name="wqp",
parameterType="GPValueTable",
multiValue=True,
direction="Input"
)
wqp.columns = [['GPString', 'Type'], ['GPString', 'Current'], ['GPString', 'New'], ['GPString', 'Notes'], ['GPString','ID']]
params = [date_to_generate, wqp]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
if parameters[0].altered and parameters[1].values is None:
d = parameters[0].value
t = d + datetime.timedelta(days=1) # add one day to get upper bound
lower = d.date()
upper = t.date()
session = classes.get_new_session()
try:
vt = [] # blank value table
# fills out the value table with the vertical profile info
wqp_abs = session.query(classes.ProfileSite.abbreviation, classes.VerticalProfile.source) \
.filter(classes.VerticalProfile.date_time.between(lower, upper)) \
.filter(classes.ProfileSite.id == classes.VerticalProfile.profile_site_id) \
.distinct().all()
# cast(classes.VerticalProfile.date_time, Date) == date
print(wqp_abs)
for profile in wqp_abs:
notes = "{}".format(profile[1])
vt.append(['WQP', profile[0], profile[0], notes, "NA"])
# fill out the grab sample info
grab_abs = session.query(classes.GrabSample.profile_site_id, classes.GrabSample.lab_num,
classes.GrabSample.sample_id,
classes.GrabSample.site_id, classes.GrabSample.source, classes.GrabSample.id) \
.filter(classes.GrabSample.date.between(lower, upper)) \
.distinct().all()
for profile in grab_abs:
notes = "{}, {}, {}, {}".format(profile[1], profile[2], profile[3], profile[4])
# some of the grab samples don't have profile_site and should return None
pro_abbrev = swap_site_recs.lookup_profile_abbreviation(session, profile[0])
vt.append(["GRAB", pro_abbrev, pro_abbrev, notes, profile[5]])
sorted_vt = sorted(vt, key = lambda x: x[1])
parameters[1].values = sorted_vt
# potential profile abbreviations
profiles = session.query(classes.ProfileSite.abbreviation).distinct().all()
profile_abbreviation = []
# add profile name to site list
for profile in profiles:
profile_abbreviation.append(profile[0])
parameters[1].filters[2].type = 'ValueList'
parameters[1].filters[2].list = profile_abbreviation
finally:
session.close()
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
@parameters_as_dict
def execute(self, parameters, messages):
d = parameters["date_to_generate"].value
t = d + datetime.timedelta(days=1) # add one day to get upper bound
lower = d.date()
upper = t.date()
# get the vt parameters
vt = parameters["wqp"].values # values are list of lists
for i in range(0, len(vt)):
record_type = vt[i][0]
current = vt[i][1]
new = vt[i][2]
grabid = vt[i][4]
if current == new:
pass
elif record_type == "WQP":
arcpy.AddMessage("Changing {} to {} for {} records on {}".format(current, new, record_type, lower))
session = classes.get_new_session()
try:
query = session.query(classes.VerticalProfile) \
.filter(classes.VerticalProfile.date_time.between(lower, upper)) \
.filter(classes.ProfileSite.id == classes.VerticalProfile.profile_site_id) \
.filter(classes.ProfileSite.abbreviation == current).all()
for q in query:
q.profile_site_id = swap_site_recs.lookup_profile_site_id(session, new)
session.commit()
finally:
session.close()
elif record_type == "GRAB":
arcpy.AddMessage("Changing {} to {} for {} records on {}".format(current, new, record_type, lower))
session = classes.get_new_session()
try:
query = session.query(classes.GrabSample) \
.filter(classes.GrabSample.date.between(lower, upper)) \
.filter(classes.GrabSample.id == grabid).one()
query.profile_site_id = swap_site_recs.lookup_profile_site_id(session, new)
session.commit()
finally:
session.close()
return
class RegressionPlot(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Regression Plot"
self.description = ""
self.canRunInBackground = False
self.category = "Regression"
def getParameterInfo(self):
"""Define parameter definitions"""
date_to_generate = arcpy.Parameter(
displayName="Date",
name="date_to_generate",
datatype="GPDate",
multiValue=False,
direction="Input")
gain_setting = arcpy.Parameter(
displayName="Gain Setting",
name="gain_setting",
datatype="GPString",
multiValue=False,
direction="Input")
#gain_setting.filter.type = 'ValueList'
#gain_setting.filter.list = ['0', '1', '10', '100']
depths = arcpy.Parameter(
displayName="All depths?",
name="depths",
datatype="GPBoolean",
parameterType="Optional")
preview = arcpy.Parameter(
displayName="Preview?",
name="preview",
datatype="GPBoolean",
parameterType="Optional")
output = arcpy.Parameter(
displayName="Output Location for Graph",
name="output",
datatype="DEFile",
parameterType="Optional",
direction="Output")
commit = arcpy.Parameter(
displayName="Commit regression to the database?",
name="commit",
datatype="GPBoolean",
parameterType="Optional")
params = [date_to_generate, gain_setting, depths, preview, output, commit]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
if parameters[0].altered:
# turn off params (clicking box when tool is running with crash arc)
parameters[1].enabled = True
d = parameters[0].value
t = d + datetime.timedelta(days=1) # add one day to get upper bound
lower = d.date()
upper = t.date()
session = classes.get_new_session()
try:
gains = session.query(classes.VerticalProfile.gain_setting) \
.filter(classes.VerticalProfile.date_time.between(lower, upper)) \
.distinct().all()
gain_settings = []
# add profile name to site list
for g in gains:
print(g[0])
gain_settings.append(g[0])
parameters[1].filter.type = 'ValueList'
parameters[1].filter.list = gain_settings
finally:
session.close()
else:
parameters[1].enabled = False
if parameters[0].value and parameters[1].altered:
# turn off params (clicking box when tool is running with crash arc)
parameters[2].enabled = True
parameters[3].enabled = True
else:
parameters[2].enabled = False
parameters[3].enabled = False
if parameters[3].value is True: # add in conditional for other two params
### DEFINE DATA PATHS ###
base_path = config.arcwqpro
rscript_path = config.rscript # path to R exe
chl_reg = os.path.join(base_path, "arcproject", "scripts", "chl_regression.R")
date_time = parameters[0].value
date = str(date_time.date())
gain = parameters[1].valueAstext
output = os.path.join(base_path, "arcproject", "plots", "chl_regression_tool_preview.png")
if parameters[2].value:
depths = "TRUE"
else:
depths = "FALSE"
try:
CREATE_NO_WINDOW = 0x08000000 # used to hide the console window so it stays in the background
subprocess.check_output([rscript_path, chl_reg, "--args", date, gain, output, depths, "FALSE"],
creationflags=CREATE_NO_WINDOW,
stderr=subprocess.STDOUT) # ampersand makes it run without a console window
webbrowser.open(output)
except subprocess.CalledProcessError as e:
arcpy.AddError("Call to R returned exit code {}.\nR output the following while processing:\n{}".format(
e.returncode, e.output))
finally:
parameters[3].value = False
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
### DEFINE DATA PATHS ###
base_path = config.arcwqpro
rscript_path = config.rscript # path to R exe
chl_reg = os.path.join(base_path, "arcproject", "scripts", "chl_regression.R")
date_time = parameters[0].value
date = str(date_time.date())
gain = parameters[1].valueAstext
output = parameters[4].valueAstext
if parameters[2].value:
depths = "TRUE"
else:
depths = "FALSE"
if parameters[5].value:
commit = "TRUE"
else:
commit = "FALSE"
if output is None:
output = os.path.join(base_path, "arcproject", "plots", "chl_regression_tool_preview.png")
arcpy.AddMessage("{}, {}, {}, {}, {}, {}, {},{}".format(rscript_path, chl_reg, "--args", date, gain, output, depths, commit))
try:
CREATE_NO_WINDOW = 0x08000000 # used to hide the console window so it stays in the background
subprocess.check_output([rscript_path, chl_reg, "--args", date, gain, output, depths, commit],
creationflags=CREATE_NO_WINDOW,
stderr=subprocess.STDOUT) # ampersand makes it run without a console window
except subprocess.CalledProcessError as e:
arcpy.AddError("Call to R returned exit code {}.\nR output the following while processing:\n{}".format(
e.returncode, e.output))
return
class CorrectChl(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Correct Chl Values"
self.description = "Correct Chl values from regression table."
self.canRunInBackground = False
self.category = "Regression"
def getParameterInfo(self):
query = arcpy.Parameter(
displayName="Type of query to select records to modify?",
name="query",
datatype="GPString",
multiValue=False,
direction="Input",
parameterType="Required"
)
query.filter.type = "ValueList"
query.filter.list = ["ALL", "NEW", "DATERANGE", "IDRANGE"]
date1 = arcpy.Parameter(
displayName="Start date",
name="date1",
datatype="GPDate",
direction="Input",
parameterType="Optional"
)
date2 = arcpy.Parameter(
displayName="End date",
name="date2",
datatype="GPDate",
direction="Input",
parameterType="Optional"
)
id1 = arcpy.Parameter(
displayName="Start ID",
name="id1",
datatype="GPLong",
direction="Input",
parameterType="Optional"
)
id2 = arcpy.Parameter(
displayName="End ID",
name="id2",
datatype="GPLong",
direction="Input",
parameterType="Optional"
)
params = [query, date1, date2, id1, id2]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
if parameters[0].valueAsText == "DATERANGE":
parameters[1].enabled = True
parameters[2].enabled = True
else:
parameters[1].enabled = False
parameters[2].enabled = False
if parameters[0].valueAsText == "IDRANGE":
parameters[3].enabled = True
parameters[4].enabled = True
else:
parameters[3].enabled = False
parameters[4].enabled = False
return
def updateMessages(self, parameters):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
def execute(self, parameters, messages):
query_type = parameters[0].valueAsText
start_date = parameters[1].value
end_date = parameters[2].value
start_id = parameters[3].value
end_id = parameters[4].value
arcpy.AddMessage("PARAMS: type = {}, start date = {}, "
"end date = {}, start id = {}, end id = {}".format(query_type, start_date,
end_date, start_id, end_id))
if start_date is not None and end_date is not None:
# round python date time objects to start of the day (in case times are included in tbx input)
start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0)
end_date = end_date.replace(hour=0, minute=0, second=0, microsecond=0)
date_range = [start_date, end_date]
else:
date_range = None
if start_id is not None and end_id is not None:
id_range = [start_id, end_id]
else:
id_range = None
arcpy.AddMessage("Updating Chl values for points. Be patient...")
chl_decision_tree.main(query_type, daterange=date_range, idrange=id_range)
return
class ExportHeatPlotData(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Export Heatplot data CSV"
self.description = ""
self.canRunInBackground = False
self.category = "Mapping"
def getParameterInfo(self):
code = arcpy.Parameter(
displayName="Code for Transect",
name="code",
datatype="GPString",
multiValue=False,
direction="Input"
)
output_csv = arcpy.Parameter(
displayName="Output CSV",
name="output_csv",
datatype="DEFile",
multiValue=False,
direction="Output"
)
params = [code, output_csv]
return params
def isLicensed(self):
"""Set whether tool is licensed to execute."""
return True
def updateParameters(self, parameters):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
# validate site name by pulling creating filter with names from table
# get list of sites from the database profile sites table
session = classes.get_new_session()
try:
sites = session.query(classes.Site).distinct().all()
site_names = []
# add profile name to site list
for s in sites:
combine = s.code + ' - ' + s.name
site_names.append(combine)
parameters[0].filter.type = 'ValueList'
site_names.sort()
parameters[0].filter.list = site_names
finally:
session.close()
return
@parameters_as_dict
def execute(self, parameters, messages):
sitecodename = parameters["code"].valueAsText
sitecode = sitecodename.split(" - ")[0]
output_file = parameters["output_csv"].valueAsText
arcpy.AddMessage("Saving WaterQuality for site {} as csv.\n{}".format(sitecodename, output_file))
try:
outfile = open(output_file, 'wb')
outcsv = csv.writer(outfile)
session = classes.get_new_session()
records = session.query(classes.WaterQuality).filter(classes.Site.code == sitecode).\
filter(classes.Site.id == classes.WaterQuality.site_id)
outcsv.writerow([column.name for column in classes.WaterQuality.__mapper__.columns]) # header as row 1
[outcsv.writerow([getattr(curr, column.name) for column in classes.WaterQuality.__mapper__.columns]) for
curr in records]
outfile.close()
finally:
session.close()
return | StarcoderdataPython |
29716 | <reponame>ruslanlvivsky/python-algorithm<filename>swexpert/d3/sw_2817_1.py<gh_stars>1-10
test_cases = int(input().strip())
def sum_sub_nums(idx, value):
global result
if value == K:
result += 1
return
if value > K or idx >= N:
return
sum_sub_nums(idx + 1, value)
sum_sub_nums(idx + 1, value + nums[idx])
for t in range(1, test_cases + 1):
N, K = map(int, input().strip().split())
nums = list(map(int, input().strip().split()))
result = 0
sum_sub_nums(0, 0)
print('#{} {}'.format(t, result))
| StarcoderdataPython |
6410749 | <reponame>mozjay0619/scikit-optimize-adapter<filename>setup.py
from setuptools import setup, find_packages
import adapter
VERSION = adapter.__version__
with open("README.rst", "r") as fh:
long_description = fh.read()
setup(
name="scikit-optimize-adapter",
version=VERSION,
author="<NAME>",
description="Dask parallelized bayesian optimization toolbox",
long_description=long_description,
long_description_content_type="text/x-rst",
url=None,
license="DSB 3-clause",
packages=find_packages(),
install_requires=["scikit-optimize>=0.7.4", "dask", "distributed"]
)
| StarcoderdataPython |
1727000 | <reponame>hologerry/mmflow
# optimizer
optimizer = dict(type='Adam', lr=5e-5, weight_decay=0.0004, betas=(0.9, 0.999))
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='MultiStage',
by_epoch=False,
gammas=[0.5, 0.5, 0.5, 0.5, 0.5],
milestone_lrs=[5e-5, 3e-5, 2e-5, 1e-5, 5e-6],
milestone_iters=[0, 200000, 400000, 600000, 800000],
steps=[[100000, 150000], [300000, 350000], [500000, 550000],
[700000, 750000], [850000, 875000, 900000, 950000, 975000]])
runner = dict(type='IterBasedRunner', max_iters=1000000)
checkpoint_config = dict(by_epoch=False, interval=100000)
evaluation = dict(interval=100000, metric='EPE')
| StarcoderdataPython |
1796976 | """
TLE
dp[i][k] := min of the largest sum among k subarrays from nums[:i]
"""
class Solution(object):
def splitArray(self, nums, K):
N = len(nums)
dp = [[float('inf') for _ in xrange(K+1)] for _ in xrange(N+1)]
dp[0][0] = 0
for i in xrange(1, N+1):
for k in xrange(1, min(i, K)+1):
for j in xrange(k, i+1):
dp[i][k] = min(dp[i][k], max(dp[j-1][k-1], sum(nums[j-1:i])))
return dp[N][K]
"""
TLE
dp[i][k] := min of the largest sum among k subarrays from nums[:i]
s[i][j] := sum of nums[i:j+1]
"""
class Solution(object):
def splitArray(self, nums, K):
N = len(nums)
s = [[0 for _ in xrange(N)] for _ in xrange(N)]
for i in xrange(N): s[i][i] = nums[i]
for l in xrange(2, N):
for i in xrange(N):
j = i+l-1
if j>=N: continue
s[i][j] = s[i+1][j-1]+nums[i]+nums[j]
dp = [[float('inf') for _ in xrange(K+1)] for _ in xrange(N+1)]
dp[0][0] = 0
for i in xrange(1, N+1):
for k in xrange(1, min(i, K)+1):
for j in xrange(k, i+1):
dp[i][k] = min(dp[i][k], max(dp[j-1][k-1], s[j-1][i-1]))
return dp[N][K] | StarcoderdataPython |
3508969 | import unittest
from bandwidth.voice.bxml import Response
from lxml.builder import E
class ResponseTests(unittest.TestCase):
def test_to_xml(self):
"""
to_xml() should build XML
"""
estimated_xml = b'<xml><Response><Hangup/></Response></xml>'
xml = Response(E.Hangup())
self.assertEqual(estimated_xml, xml.to_xml())
def test_to_xml_with_several_verbs(self):
"""
to_xml() should build XML (some verbs)
"""
estimated_xml = b'<xml><Response><Pause duration="10"/><Hangup/></Response></xml>'
xml = Response(E.Pause({'duration': '10'}), E.Hangup())
self.assertEqual(estimated_xml, xml.to_xml())
def test__str__(self):
"""
__str__() should return XML as string
"""
estimated_xml = '<xml><Response><Hangup/></Response></xml>'
xml = Response(E.Hangup())
self.assertEqual(estimated_xml, str(xml))
| StarcoderdataPython |
298399 | """Techniques for inference.
Description
-----------
Inference is same as asking conditional probability questions to the models.
"""
# ------------------------------------
# Name : inference.py
# Author : E.Taskesen
# Contact : <EMAIL>
# Licence : See licences
# ------------------------------------
# %% Libraries
from pgmpy.inference import VariableElimination
import bnlearn
import numpy as np
# %% Exact inference using Variable Elimination
def fit(model, variables=None, evidence=None, to_df=True, verbose=3):
"""Inference using using Variable Elimination.
Parameters
----------
model : dict
Contains model.
variables : List, optional
For exact inference, P(variables | evidence). The default is None.
* ['Name_of_node_1']
* ['Name_of_node_1', 'Name_of_node_2']
evidence : dict, optional
For exact inference, P(variables | evidence). The default is None.
* {'Rain':1}
* {'Rain':1, 'Sprinkler':0, 'Cloudy':1}
to_df : Bool, (default is True)
The output is converted to dataframe output. Note that this heavily impacts the speed.
verbose : int, optional
Print progress to screen. The default is 3.
0: None, 1: ERROR, 2: WARN, 3: INFO (default), 4: DEBUG, 5: TRACE
Returns
-------
query inference object.
Examples
--------
>>> import bnlearn as bn
>>>
>>> # Load example data
>>> model = bn.import_DAG('sprinkler')
>>> bn.plot(model)
>>>
>>> # Do the inference
>>> query = bn.inference.fit(model, variables=['Wet_Grass'], evidence={'Rain':1, 'Sprinkler':0, 'Cloudy':1})
>>> print(query)
>>> query.df
>>>
>>> query = bn.inference.fit(model, variables=['Wet_Grass','Rain'], evidence={'Sprinkler':1})
>>> print(query)
>>> query.df
>>>
"""
if not isinstance(model, dict): raise Exception('[bnlearn] >Error: Input requires a object that contains the key: model.')
adjmat = model['adjmat']
if not np.all(np.isin(variables, adjmat.columns)):
raise Exception('[bnlearn] >Error: [variables] should match names in the model (Case sensitive!)')
if not np.all(np.isin([*evidence.keys()], adjmat.columns)):
raise Exception('[bnlearn] >Error: [evidence] should match names in the model (Case sensitive!)')
if verbose>=3: print('[bnlearn] >Variable Elimination..')
# Extract model
if isinstance(model, dict):
model = model['model']
# Check BayesianModel
if 'BayesianModel' not in str(type(model)):
if verbose>=1: print('[bnlearn] >Warning: Inference requires BayesianModel. hint: try: parameter_learning.fit(DAG, df, methodtype="bayes") <return>')
return None
# Convert to BayesianModel
if 'BayesianModel' not in str(type(model)):
model = bnlearn.to_bayesianmodel(adjmat, verbose=verbose)
try:
model_infer = VariableElimination(model)
except:
raise Exception('[bnlearn] >Error: Input model does not contain learned CPDs. hint: did you run parameter_learning.fit?')
# Computing the probability P(class | evidence)
query = model_infer.query(variables=variables, evidence=evidence, show_progress=(verbose>0))
# Store also in dataframe
query.df = bnlearn.query2df(query) if to_df else None
if verbose>=3: print(query)
# Return
return(query)
| StarcoderdataPython |
386213 | <filename>saltlint/rules/SLSFileNameRule.py
# -*- coding: utf-8 -*-
# opyright (c) 2020 Warpnet B.V.
import os
from saltlint.linter.rule import Rule
from saltlint.utils import LANGUAGE_SLS
class SLSFileNameRule(Rule):
id = '214'
shortdesc = ('SLS file with a period in the name (besides the suffix period) can not be '
'referenced')
description = ('SLS file with a period in the name (besides the suffix period) can not be '
'referenced')
severity = 'HIGH'
languages = [LANGUAGE_SLS]
tags = ['formatting']
version_added = 'v0.5.0'
def matchtext(self, file, text):
results = []
path = file['path']
basename = os.path.basename(path)
if len(basename.split('.')) > 2:
line_no = 1
lines = text.splitlines()
line = lines[0] if len(lines) > 0 else ''
results.append((line_no, line, self.shortdesc))
return results
| StarcoderdataPython |
152044 | <reponame>SebaDro/st-deep-hydro
import logging
import os
import xarray as xr
from typing import Union
from libs import processing
logger = logging.getLogger(__name__)
def nse_metric(observations: xr.DataArray, predictions: xr.DataArray, as_dataset: bool = False, basin: str = None)\
-> Union[float, xr.Dataset]:
"""
Calculates the Nash-Sutcliffe Efficiency (NSE) metric from streamflow observations and predictions.
Parameters
----------
observations: xarray:DataArray
DataArray containing the streamflow observations (true values)
predictions: xarray:DataArray
DataArray containing the streamflow model predictions
as_dataset: bool
Indicates whether the calculated NSE metric should be returned as raw value or as xarray.Dataset indexed by the
basin ID.
basin: str
ID of the basin to calculate the NSE metric for
Returns
-------
Union[float, xr.Dataset]
Nash-Sutcliffe Efficiency (NSE) metric either as raw float value or basin indexed xarray.Dataset
"""
nse = 1 - xr.DataArray.sum((predictions - observations) ** 2) / xr.DataArray.sum(
(observations - observations.mean()) ** 2)
if as_dataset and basin is not None:
return xr.Dataset(dict(nse=(["basin"], [nse])), coords=dict(basin=[basin]))
else:
return nse
class Evaluation:
def __init__(self):
self.__ds_results = xr.Dataset()
@property
def ds_results(self):
return self.__ds_results
def append_evaluation_results(self, ds: xr.Dataset):
"""
Appends new evaluation results to the existing ones
Parameters
----------
ds: xarray.Dataset
Dataset containing aligned observation and prediction timeseries data as well as basin indexed NSE metrics
"""
self.__ds_results = xr.merge([self.__ds_results, ds])
def calc_evaluation(self, ds_obs: xr.Dataset, ds_pred: xr.Dataset, target_var: str, basin: str,
append: bool = True) -> Union[None, xr.Dataset]:
"""
Prepares observations and predictions and calculates evaluation metrics for a certain basin and target variable.
Therefore, xarray.Datasets containing observations and predictions are merged to cover the same timespan in a
first step. Following, the NSE metric is calculated and the result appended to already existing results
Parameters
----------
ds_obs: xarray.Dataset
Dataset containing observations
ds_pred: xarray.Dataset
Dataset containing predictions
target_var: str
Name of the target variable, which will be used to create target_obs and target_pred variables in the
common dataset.
basin: str
ID of the basin
append: bool
Indicates whether the evaluation result should be appended to the existing ones of this instance or returned
without appending.
Returns
-------
Union[None, xr.Dataset]
None or a dataset containing the evaluation results in accordance to the 'append' parameter
"""
ds_res = processing.merge_observations_and_predictions(ds_obs, ds_pred, True)
observations = ds_res[target_var + "_obs"]
predictions = ds_res[target_var + "_pred"]
ds_nse = nse_metric(observations, predictions, as_dataset=True, basin=basin)
ds_res = xr.merge([ds_res, ds_nse])
if append:
self.append_evaluation_results(ds_res)
else:
return ds_res
def save(self, out_dir: str, pref: str = None):
name = "prediction.nc" if pref is None else f"{pref}_prediction.nc"
out_path = os.path.join(out_dir, name)
self.ds_results.to_netcdf(out_path)
| StarcoderdataPython |
4916044 | from typing import List, Optional, Tuple
import pandas as pd # type: ignore
from pydantic import BaseModel
def drop_na_inputs(*, input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model inputs for na values and filter."""
validated_data = input_data.copy()
validated_data.dropna(inplace=True)
return validated_data
def validate_inputs(*, input_data: pd.DataFrame) -> Tuple[pd.DataFrame, Optional[dict]]:
"""Check model inputs for unprocessable values."""
# convert syntax error field names (beginning with numbers)
relevant_data = input_data.copy()
validated_data = drop_na_inputs(input_data=relevant_data)
errors = None
# try:
# replace numpy nans so that pydantic can validate
# MultipleSmilesDataInputs(
# yolo=validated_data.replace({np.nan: None}).to_dict(orient="records")
# )
# except ValidationError as error:
# errors = error.json()
return validated_data, errors
class SmilesDataInputSchema(BaseModel):
# SMILES: List[str]
SMILES: Optional[str]
# embeddings: Optional[list]
# predictions: Optional[float]
class MultipleSmilesDataInputs(BaseModel):
yolo: List[SmilesDataInputSchema]
| StarcoderdataPython |
6606153 | import os
from collections import OrderedDict
import igibson
from igibson.utils.constants import MAX_CLASS_COUNT, SemanticClass
# To maintain backward compatibility, the starting class id should be SemanticClass.SCENE_OBJS + 1
def get_class_name_to_class_id(starting_class_id=SemanticClass.SCENE_OBJS + 1):
"""
Get mapping from semantic class name to class id
:param starting_class_id: starting class id for scene objects
"""
category_txt = os.path.join(igibson.ig_dataset_path, "metadata", "categories.txt")
class_name_to_class_id = OrderedDict()
class_name_to_class_id["agent"] = SemanticClass.ROBOTS # Agents should have the robot semantic class.
if os.path.isfile(category_txt):
with open(category_txt) as f:
for line in f.readlines():
# The last few IDs are reserved for DIRT, STAIN, WATER, etc.
assert starting_class_id < SemanticClass.DIRT, "Class ID overflow"
class_name_to_class_id[line.strip()] = starting_class_id
starting_class_id += 1
return class_name_to_class_id
# valid room class starts from 1
def get_room_name_to_room_id(starting_room_id=1):
"""
Get mapping from room name to room id
"""
category_txt = os.path.join(igibson.ig_dataset_path, "metadata", "room_categories.txt")
room_name_to_room_id = OrderedDict()
if os.path.isfile(category_txt):
with open(category_txt) as f:
for line in f.readlines():
room_name_to_room_id[line.strip()] = starting_room_id
starting_room_id += 1
return room_name_to_room_id
CLASS_NAME_TO_CLASS_ID = get_class_name_to_class_id()
ROOM_NAME_TO_ROOM_ID = get_room_name_to_room_id()
| StarcoderdataPython |
9619308 | class Token:
def __init__(self, question, multiline, start, end):
self.multiline = multiline
self.question = question
self.start = start
self.end = end
super().__init__()
def __str__(self):
return '(Q:{0:} ,M:{1:} ,Start: {2:} ,End: {3:} )'.format(self.question, self.multiline, self.start, self.end)
| StarcoderdataPython |
232246 | <gh_stars>1-10
"""
Convert an OpenAI gym environment into a Spikey game.
Examples
--------
.. code-block:: python
from gym.envs.classic_control import cartpole
cartpole_env = gym_wrapper(cartpole.CartPoleEnv)
kwargs = {
"param1": 0,
}
game = cartpole_env(**kwargs)
game.seed(0)
state = game.reset()
for _ in range(100):
action = model.get_action(state)
state, reward, done, info = game.step(action)
if done:
break
game.close()
"""
from copy import deepcopy
from spikey.games.template import RL
class GymWrap(RL):
# Use by adding 2 classes to __bases__,
# a Game derivative then a gym env.
def __init__(self, env_kwargs={}, *args, **kwargs):
mro = type(self).__mro__
game_idx = mro.index(RL)
super().__init__(**env_kwargs) # Always env regardless of its MRO
mro[game_idx].__init__(
self, *args, **kwargs
) # base, asserting base.__base__ == RL
def gym_wrapper(env: type) -> type:
"""
Wrap openai gym environment for compatability within Spikey.
Restructures environment into RL game.
WARNING: May break inheretance when wrapping multiple different
gym envs in the same file, check the wrapped_env.__mro__ of each
to ensure has only desired values.
Parameters
----------
env: gym.Env
Uninitialized gym environment.
Return
------
GymWrap Restructured version of Env. Notably, if need to pass
any parameters to the gym env, do GymWrap(env_kwargs={...}, **RL_kwargs)
Examples
--------
.. code-block:: python
from gym.envs.classic_control import cartpole
cartpole_env = gym_wrapper(cartpole.CartPoleEnv)
gym_kwargs = {
}
kwargs = {
"param1": 0,
}
game = cartpole_env(env_kwargs=gym_kwargs, **kwargs)
game.seed(0)
state = game.reset()
for _ in range(100):
action = model.get_action(state)
state, reward, done, info = game.step(action)
if done:
break
game.close()
"""
type_new = deepcopy(GymWrap)
type_new.__bases__ = (env, RL)
try:
name_new = f"RL_{env.__name__}"
except Exception:
name_new = f"RL_ENV"
type_new.__name__ = name_new
return type_new
| StarcoderdataPython |
3403383 | import click
from FC2 import utils
from FC2 import aircraft
from FC2 import airport
from FC2 import country
from FC2 import currency
from FC2 import itenerary
from FC2 import price
from FC2 import routing
import time
import networkx as nx
import pandas as pd
import os,FC2
@click.command()
@click.argument("file", type=click.Path(exists=True), default=os.path.join(FC2.__path__[0], 'data', 'testroutes.csv'))
def main(file):
# Creating the required Objects
airport_obj = airport.Airport()
aircraft_obj = aircraft.Aircraft()
currency_obj = currency.Currency()
country_obj = country.Country()
utils_obj = utils.Utility()
it_obj = itenerary.Itenerary()
# Getting the data
airports = airport_obj.get_AirportData(os.path.join(FC2.__path__[0], 'data', 'airports_new.csv'))
curr = currency_obj.get_CurrencyData(os.path.join(
FC2.__path__[0], 'data', 'currencyrates.csv'))
countries = country_obj.get_CountryData(
os.path.join(FC2.__path__[0], 'data', 'countrycurrency.csv'))
aircrafts = aircraft_obj.get_AircraftData(
os.path.join(FC2.__path__[0], 'data', 'aircraft.csv'))
_aircraftsDict = aircrafts.set_index('code').to_dict(orient='index')
# Pass this file to Utility to check for errors
utils_obj.displayStatusFormatMessage("\tParsing Inputs for Errors")
testRoutes = utils_obj.handleTestInput(file)
mergedData = utils_obj.getDict(airports, curr, countries[[
'name', 'currency_alphabetic_code']])
utils_obj.displaySuccessFormatMessage("\tParsing Inputs for Errors -- COMPLETE")
if len(testRoutes) == 0:
print("None of the Test Lists had unique airports. Please try again")
else:
finalCSV = []
for routes in testRoutes:
_route = routing.Routes()
locations = []
utils_obj.displayStatusFormatMessage("\tCleaning Individual Inputs")
cleanedInput = utils_obj.checkInputErrors(routes, airports, aircrafts)
utils_obj.displaySuccessFormatMessage(
"\tCleaning Individual Inputs -- COMPLETE")
hashes = '#'*150
utils_obj.displayManFormatMessage("{} \n\t\t\tCalculating Shortest Route for {}\n{}".format(hashes,cleanedInput,hashes))
utils_obj.displayManFormatMessage("\t\t[Generating Intinerary]",color="cyan")
filteredData = it_obj.getIteneraryData(cleanedInput,mergedData)
for locs in cleanedInput[0]:
locations.append((filteredData.get(locs).get('Latitude'),filteredData.get(locs).get('Longitude')))
airportAdjGraph = it_obj.getAdjacencyGraph(locations)
utils_obj.displayManFormatMessage(
"\t\t[Intinerary Generated]", color="cyan")
utils_obj.displayManFormatMessage(
"\t\t[Finding Route -----------------------]", color="cyan")
airCRange,aircraft_type = utils_obj.isAircraft(cleanedInput,_aircraftsDict)
routeList,routeDistances,Ngraph =_route.getRoute(airportAdjGraph,cleanedInput,filteredData)
utils_obj.drawGraph(cleanedInput,Ngraph)
if not airCRange == None:
isRoutePossible = _route.isPossible(airCRange, routeDistances)
if isRoutePossible:
finalRoute, finalDistances = _route.getFinalAcRoute(
airCRange, routeList, routeDistances)
totalDistance = sum(finalDistances)
_price = price.Price()
finalRoute, finalPrice = _price.getPrice(
finalRoute, finalDistances, filteredData, airCRange)
finalRoute.append(aircraft_type)
finalRoute.append(finalPrice)
finalCSV.append(finalRoute)
utils_obj.displayManFormatMessage("{}".format(hashes))
utils_obj.displayManFormatMessage(
"\tROUTE GENERATED: {} CALCULATED PRICE: \u20ac{:.2f}".format(finalRoute[:-1], finalRoute[-1]), color="green")
utils_obj.displayManFormatMessage("{}".format(hashes))
else:
total_distance="Route Not Possible"
ogRoute = list(cleanedInput[0])
ogRoute.append(aircraft_type)
ogRoute.append("No Route")
finalCSV.append(ogRoute)
utils_obj.displayManFormatMessage("{}".format(hashes))
utils_obj.displayManFormatMessage(
"\t ROUTE: {} AIRCRAFT CANNOT MAKE THE JOURNEY TRY AGAIN".format(ogRoute[:-1]), color="red")
utils_obj.displayManFormatMessage("{}".format(hashes))
else:
total_distance = sum(routeDistances)
routeList.append(total_distance)
finalCSV.append(routeList)
utils_obj.displayManFormatMessage("{}".format(hashes))
utils_obj.displayManFormatMessage(
"\tROUTE GENERATED: {} No Aircraft. CALCULATED DISTANCE: {:.2f}km".format(routeList[:-1], routeList[-1]), color="yellow")
utils_obj.displayManFormatMessage("{}".format(hashes))
if not finalCSV == []:
utils_obj.to_csv(finalCSV)
if __name__=="__main__":
main()
| StarcoderdataPython |
9720851 | import torch
import numpy as np
import argparse
import pandas as pd
import sys
import os
from torch import nn
from torch.nn import functional as F
import tqdm
import pprint
from src import utils as ut
import torchvision
from haven import haven_utils as hu
from haven import haven_chk as hc
from src import datasets, models
from torch.utils.data import DataLoader
import exp_configs
from torch.utils.data.sampler import RandomSampler
from src import wrappers
from haven import haven_wizard as hw
import warnings
warnings.filterwarnings("ignore")
def trainval(exp_dict, savedir, args):
"""
exp_dict: dictionary defining the hyperparameters of the experiment
savedir: the directory where the experiment will be saved
args: arguments passed through the command line
"""
# set seed
# ==================
seed = 42
np.random.seed(seed)
torch.manual_seed(seed)
if args.use_cuda:
device = 'cuda'
torch.cuda.manual_seed_all(seed)
assert torch.cuda.is_available(), 'cuda is not, available please run with "-c 0"'
else:
device = 'cpu'
datadir = args.datadir
if args.domain_shift:
src_train_datadir = datadir + '/domain_adaptation/source/'
val_datadir = datadir + '/domain_adaptation/target/'
test_datadir = datadir + '/domain_adaptation/target/test/'
print('Running on device: %s' % device)
# Create model, opt, wrapper
model_original = models.get_model(exp_dict["model"], exp_dict=exp_dict).cuda()
opt = torch.optim.Adam(model_original.parameters(),
lr=1e-5, weight_decay=0.0005)
model = wrappers.get_wrapper(exp_dict["wrapper"], model=model_original, opt=opt).cuda()
score_list = []
# Checkpointing
# =============
score_list_path = os.path.join(savedir, "score_list.pkl")
model_path = os.path.join(savedir, "model_state_dict.pth")
opt_path = os.path.join(savedir, "opt_state_dict.pth")
if os.path.exists(score_list_path):
# resume experiment
score_list = hu.load_pkl(score_list_path)
model.load_state_dict(torch.load(model_path))
opt.load_state_dict(torch.load(opt_path))
s_epoch = score_list[-1]["epoch"] + 1
else:
# restart experiment
score_list = []
s_epoch = 0
# Load datasets
if args.domain_shift:
src_train_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
split="train",
transform=exp_dict.get("transform"),
datadir=src_train_datadir)
target_train_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
split="target_train",
transform=exp_dict.get("transform"),
datadir=val_datadir)
target_val_set = datasets.get_dataset(dataset_name=exp_dict["dataset"], split="target_val",
transform=exp_dict.get("transform"),
datadir=val_datadir)
unlabeled_idx = list(range(len(target_train_set)))
else:
train_set = datasets.get_dataset(dataset_name=exp_dict["dataset"],
split="train",
transform=exp_dict.get("transform"),
datadir=datadir)
val_set = datasets.get_dataset(dataset_name=exp_dict["dataset"], split="val",
transform=exp_dict.get("transform"),
datadir=datadir)
unlabeled_idx = list(range(len(train_set)))
test_set = datasets.get_dataset(dataset_name=exp_dict["dataset"], split="test",
transform="none",
datadir=test_datadir)
sampling_strategy = args.sampling_strategy
rand_state = np.random.RandomState(1311)
if sampling_strategy != 'None':
print("The sampling strategy is: ", sampling_strategy)
n_samples = args.n_samples
print("# samples: ", n_samples)
if sampling_strategy == "learned_loss":
n_random = min(n_samples, 40)
rand_idx = rand_state.choice(unlabeled_idx, n_random, replace=False)
else:
rand_idx = rand_state.choice(unlabeled_idx, n_samples, replace=False)
for id in rand_idx:
unlabeled_idx.remove(id)
# Run training and validation
for epoch in range(s_epoch, args.n_epochs):
score_dict = {"epoch": epoch}
# Active learning on entire source domain
if sampling_strategy != "None" and args.domain_adaptation == 0 and args.domain_shift == 0:
if sampling_strategy == "random" or (epoch < 4 and sampling_strategy == "learned_loss"):
print("random!")
print(rand_idx)
train_loader = DataLoader(train_set,
sampler=ut.SubsetSampler(train_set, indices=rand_idx),
batch_size=exp_dict["batch_size"])
elif sampling_strategy == "learned_loss" and epoch == 4:
print("active learning epoch #4: choosing labels for learning!")
# Set labels for active learning once
train_loader = DataLoader(train_set,
sampler=ut.SubsetSampler(train_set, indices=unlabeled_idx),
batch_size=exp_dict["batch_size"])
with torch.no_grad():
score, losses = model.val_on_loader(train_loader)
losses = np.array(losses)
idx = losses.argsort()[-n_samples:][::-1]
new_labeled_idx = []
for id in idx:
new_labeled_idx.append(unlabeled_idx[id])
new_labeled_idx.extend(rand_idx)
print(new_labeled_idx)
train_loader = DataLoader(train_set,
sampler=ut.SubsetSampler(train_set, indices=new_labeled_idx),
batch_size=exp_dict["batch_size"])
elif sampling_strategy == "learned_loss" and epoch > 4:
print("active learning after epoch #4!")
print(new_labeled_idx)
train_loader = DataLoader(train_set,
sampler=ut.SubsetSampler(train_set, indices=new_labeled_idx),
batch_size=exp_dict["batch_size"])
# TODO: not completely implemented.
elif sampling_strategy == "ada_clue":
train_loader = DataLoader(train_set,
sampler=ut.SubsetSampler(train_set, indices=rand_idx),
batch_size=exp_dict["batch_size"])
val_loader = DataLoader(val_set, shuffle=False, batch_size=exp_dict["batch_size"])
model.train_on_loader(model, train_loader, val_loader, args.domain_adaptation, args.sampling_strategy, n_samples)
val_loader = DataLoader(val_set, shuffle=False, batch_size=exp_dict["batch_size"])
# train
score, _ = model.train_on_loader(model, train_loader, val_loader, args.domain_adaptation, args.sampling_strategy)
score_dict.update(score)
# Add score_dict to score_list
score_list += [score_dict]
# validate
score, losses = model.val_on_loader(val_loader)
score_dict.update(score)
# # visualize
vis_loader_val = DataLoader(val_set, sampler=ut.SubsetSampler(val_set, indices=[0, 2, 4, 10, 12, 25]),
batch_size=1)
model.vis_on_loader(vis_loader_val, savedir=os.path.join(savedir, "val_images"))
# Train on source dataset with a domain shift, with optional domain adaptation (no active learning)
elif args.domain_shift:
src_train_loader = DataLoader(src_train_set, shuffle=False, batch_size=exp_dict["batch_size"])
target_train_loader = DataLoader(target_train_set, shuffle=False, batch_size=exp_dict["batch_size"])
target_val_loader = DataLoader(target_val_set, shuffle=False, batch_size=exp_dict["batch_size"])
# Collect sample weights on last DA epoch for AADA
if sampling_strategy == "aada" and args.domain_adaptation and epoch == (args.n_epochs - 1):
# train
score, sample_weights = model.train_on_loader(model, src_train_loader, target_train_loader, args.domain_adaptation, args.sampling_strategy)
score_dict.update(score)
# Choose samples with highest uncertainty and diversity for active learning
sample_weights_sorted = sorted(sample_weights, key=sample_weights.get)
aada_idx = [*sample_weights_sorted][:n_samples]
else:
# train
score, _ = model.train_on_loader(model, src_train_loader, target_train_loader, args.domain_adaptation, args.sampling_strategy)
score_dict.update(score)
# Add score_dict to score_list
score_list += [score_dict]
score, losses = model.val_on_loader(target_val_loader)
score_dict.update(score)
# visualize
vis_loader_val = DataLoader(target_val_set, sampler=ut.SubsetSampler(target_val_set, indices=[0, 2, 4, 10, 12, 25]),
batch_size=1)
model.vis_on_loader(vis_loader_val, savedir=os.path.join(savedir, "val_images"))
# Train on entire source dataset (base case)
else:
train_loader = DataLoader(train_set, shuffle=False, batch_size=exp_dict["batch_size"])
val_loader = DataLoader(val_set, shuffle=False, batch_size=exp_dict["batch_size"])
# train
score, _ = model.train_on_loader(model, train_loader, train_loader, args.domain_adaptation, args.sampling_strategy)
score_dict.update(score)
# Add score_dict to score_list
score_list += [score_dict]
# validate
score, losses = model.val_on_loader(val_loader)
score_dict.update(score)
# visualize on validation set
vis_loader_val = DataLoader(val_set, sampler=ut.SubsetSampler(val_set, indices=[0, 2, 4, 10, 12, 25]),
batch_size=1)
model.vis_on_loader(vis_loader_val, savedir=os.path.join(savedir, "val_images"))
# visualize on test set
vis_loader_test = DataLoader(test_set, sampler=ut.SubsetSampler(test_set, indices=[0, 10, 12, 13, 15]),
batch_size=1)
model.vis_on_loader_no_gt_mask(vis_loader_test, savedir=os.path.join(savedir, "test_images"))
# Report and save
print(pd.DataFrame(score_list).tail())
hu.save_pkl(score_list_path, score_list)
hu.torch_save(model_path, model.state_dict())
hu.torch_save(opt_path, opt.state_dict())
print("Saved in %s" % savedir)
# Active learning + domain adaptation
if args.domain_adaptation and sampling_strategy != "None":
# Domain shift dataset
if args.domain_shift:
unlabeled_idx = list(range(len(target_train_set)))
if sampling_strategy == "random":
rand_idx = rand_state.choice(unlabeled_idx, n_samples, replace=False)
print(rand_idx)
target_train_loader = DataLoader(target_train_set,
sampler=ut.SubsetSampler(target_train_set, indices=rand_idx),
batch_size=exp_dict["batch_size"])
elif sampling_strategy == "learned_loss":
# Set labels for active learning once
target_train_loader = DataLoader(target_train_set,
sampler=ut.SubsetSampler(target_train_set, indices=unlabeled_idx),
batch_size=exp_dict["batch_size"])
with torch.no_grad():
score, losses = model.val_on_loader(target_train_loader)
losses = np.array(losses)
idx = losses.argsort()[-n_samples:][::-1]
new_labeled_idx = []
for id in idx:
new_labeled_idx.append(unlabeled_idx[id])
print(new_labeled_idx)
target_train_loader = DataLoader(target_train_set,
sampler=ut.SubsetSampler(target_train_set, indices=new_labeled_idx),
batch_size=exp_dict["batch_size"])
elif sampling_strategy == "aada":
# Set labels for active learning once
target_train_loader = DataLoader(target_train_set,
sampler=ut.SubsetSampler(target_train_set, indices=aada_idx),
batch_size=exp_dict["batch_size"])
for epoch in range(s_epoch, args.n_epochs):
# train
score, _ = model.train_on_loader(model, target_train_loader, target_val_loader, 0, args.sampling_strategy)
score_dict.update(score)
# Add score_dict to score_list
score_list += [score_dict]
# validate
score, losses = model.val_on_loader(target_val_loader)
score_dict.update(score)
# visualize on validation set
vis_loader_val = DataLoader(target_val_set, sampler=ut.SubsetSampler(target_val_set, indices=[0, 2, 4, 10, 12, 25]),
batch_size=1)
model.vis_on_loader(vis_loader_val, savedir=os.path.join(savedir, "val_images"))
# Base case dataset
else:
unlabeled_idx = list(range(len(train_set)))
if sampling_strategy == "random":
rand_idx = rand_state.choice(unlabeled_idx, n_samples, replace=False)
print(rand_idx)
train_loader = DataLoader(train_set,
sampler=ut.SubsetSampler(train_set, indices=rand_idx),
batch_size=exp_dict["batch_size"])
elif sampling_strategy == "learned_loss":
# Set labels for active learning
train_loader = DataLoader(train_set,
sampler=ut.SubsetSampler(train_set, indices=unlabeled_idx),
batch_size=exp_dict["batch_size"])
with torch.no_grad():
score, losses = model.val_on_loader(train_loader)
losses = np.array(losses)
idx = losses.argsort()[-n_samples:][::-1]
new_labeled_idx = []
for id in idx:
new_labeled_idx.append(unlabeled_idx[id])
print(new_labeled_idx)
train_loader = DataLoader(train_set,
sampler=ut.SubsetSampler(train_set, indices=new_labeled_idx),
batch_size=exp_dict["batch_size"])
for epoch in range(s_epoch, args.n_epochs):
# train
score, _ = model.train_on_loader(model, train_loader, val_loader, 0, args.sampling_strategy)
score_dict.update(score)
# Add score_dict to score_list
score_list += [score_dict]
# validate
score, losses = model.val_on_loader(val_loader)
score_dict.update(score)
# visualize on validation set
vis_loader_val = DataLoader(val_set, sampler=ut.SubsetSampler(val_set, indices=[0, 2, 4, 10, 12, 25]),
batch_size=1)
model.vis_on_loader(vis_loader_val, savedir=os.path.join(savedir, "val_images"))
# visualize on test set
vis_loader_test = DataLoader(test_set, sampler=ut.SubsetSampler(test_set, indices=[0, 10, 12, 13, 15]),
batch_size=1)
model.vis_on_loader_no_gt_mask(vis_loader_test, savedir=os.path.join(savedir, "test_images"))
# Report and save
print(pd.DataFrame(score_list).tail())
hu.save_pkl(score_list_path, score_list)
hu.torch_save(model_path, model.state_dict())
hu.torch_save(opt_path, opt.state_dict())
print("Saved in %s" % savedir)
# Fine-tune on target
elif args.domain_adaptation == 0 and args.domain_shift and sampling_strategy != "None":
if sampling_strategy == "random":
print(rand_idx)
target_train_loader = DataLoader(target_train_set,
sampler=ut.SubsetSampler(target_train_set, indices=rand_idx),
batch_size=exp_dict["batch_size"])
elif sampling_strategy == "learned_loss":
# Set labels for active learning once
target_train_loader = DataLoader(target_train_set,
sampler=ut.SubsetSampler(target_train_set, indices=unlabeled_idx),
batch_size=exp_dict["batch_size"])
with torch.no_grad():
score, losses = model.val_on_loader(target_train_loader)
losses = np.array(losses)
idx = losses.argsort()[-n_samples:][::-1]
new_labeled_idx = []
for id in idx:
new_labeled_idx.append(unlabeled_idx[id])
new_labeled_idx.extend(rand_idx)
print(new_labeled_idx)
target_train_loader = DataLoader(target_train_set,
sampler=ut.SubsetSampler(target_train_set, indices=new_labeled_idx),
batch_size=exp_dict["batch_size"])
target_val_loader = DataLoader(target_val_set, shuffle=False, batch_size=exp_dict["batch_size"])
for epoch in range(s_epoch, args.n_epochs):
# train
score, _ = model.train_on_loader(model, target_train_loader, target_val_loader, 0, args.sampling_strategy)
score_dict.update(score)
# Add score_dict to score_list
score_list += [score_dict]
# validate
score, losses = model.val_on_loader(target_val_loader)
score_dict.update(score)
# visualize on validation set
vis_loader_val = DataLoader(target_val_set, sampler=ut.SubsetSampler(target_val_set, indices=[0, 2, 4, 10, 12, 25]),
batch_size=1)
model.vis_on_loader(vis_loader_val, savedir=os.path.join(savedir, "val_images"))
# visualize on test set
vis_loader_test = DataLoader(test_set, sampler=ut.SubsetSampler(test_set, indices=[0, 10, 12, 13, 15]),
batch_size=1)
model.vis_on_loader_no_gt_mask(vis_loader_test, savedir=os.path.join(savedir, "test_images"))
# Report and save
print(pd.DataFrame(score_list).tail())
hu.save_pkl(score_list_path, score_list)
hu.torch_save(model_path, model.state_dict())
hu.torch_save(opt_path, opt.state_dict())
print("Saved in %s" % savedir)
if __name__ == '__main__':
# define a list of experiments
import exp_configs
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--exp_group_list', nargs="+",
help='Define which exp groups to run.')
parser.add_argument('-sb', '--savedir_base', default=None,
help='Define the base directory where the experiments will be saved.')
parser.add_argument('-d', '--datadir', default=None,
help='Define the dataset directory.')
parser.add_argument("-r", "--reset", default=0, type=int,
help='Reset or resume the experiment.')
parser.add_argument("--debug", default=False, type=int,
help='Debug mode.')
parser.add_argument("-ei", "--exp_id", default=None,
help='Run a specific experiment based on its id.')
parser.add_argument("-j", "--run_jobs", default=0, type=int,
help='Run the experiments as jobs in the cluster.')
parser.add_argument("-nw", "--num_workers", type=int, default=0,
help='Specify the number of workers in the dataloader.')
parser.add_argument("-v", "--visualize_notebook", type=str, default='',
help='Create a jupyter file to visualize the results.')
parser.add_argument("-uc", "--use_cuda", type=int, default=1)
parser.add_argument("-da", "--domain_adaptation", type=int, default=0)
parser.add_argument("-ss", "--sampling_strategy", type=str, default='None')
parser.add_argument("-ds", "--domain_shift", type=int, default=0)
parser.add_argument("-n", "--n_samples", type=int, default=310)
parser.add_argument("-ne", "--n_epochs", type=int, default=5)
args, others = parser.parse_known_args()
# 9. Launch experiments using magic command
hw.run_wizard(func=trainval, exp_groups=exp_configs.EXP_GROUPS, args=args) | StarcoderdataPython |
6547470 | import pandas as pd
from pandas import DataFrame
from sklearn import linear_model
import statsmodels.api as sm
data = pd.read_csv('train_data/simple.csv')
df = DataFrame(data, columns=['p', 'prev_price', 't', 'x1', 'x2'])
X = df[['x1', 'x2']]
Y = df['']
print(df) | StarcoderdataPython |
3357055 | <gh_stars>1-10
"""Miscellaneous IO related functions.."""
def dag_to_str(dag):
return "|".join(
[
str(f[0])
if len(f) == 1
else " ".join([str(f[0]), *[str(v) for v in sorted(f[1])]])
for f in dag
]
)
def str_to_dag(dag_str):
def parse_family_str(fstr):
fstr = fstr.split()
if len(fstr) > 1:
return (int(fstr[0]), set(map(int, fstr[1:])))
else:
return (int(fstr[0]), set())
return list(map(parse_family_str, dag_str.split("|")))
def read_candidates(candidate_path):
"""Read parent candidates from file.
Row number identifies the node and space separated numbers on each row
identify the candidate parents.
"""
C = dict()
with open(candidate_path, "r") as f:
f = f.readlines()
for v, row in enumerate(f):
C[v] = tuple([int(x) for x in row.split()])
return C
def read_jkl(scorepath):
scores = dict()
with open(scorepath, "r") as jkl_file:
rows = jkl_file.readlines()
scores = dict()
n_scores = 0
for row in rows[1:]:
if not n_scores:
n_scores = int(row.strip().split()[1])
current_var = int(row.strip().split()[0])
scores[current_var] = dict()
continue
row_list = row.strip().split()
score = float(row_list[0])
n_parents = int(row_list[1])
parents = frozenset()
if n_parents > 0:
parents = frozenset([int(x) for x in row_list[2:]])
scores[current_var][frozenset(parents)] = score
n_scores -= 1
return scores
def write_jkl(scores, fpath):
"""Assumes the psets are iterables, not bitmaps"""
with open(fpath, "w") as f:
lines = list()
n = len(scores)
lines.append(str(n) + "\n")
for v in sorted(scores):
lines.append(f"{v} {len(scores[v])}\n")
for pset in sorted(scores[v], key=lambda pset: len(pset)):
lines.append(
f"{scores[v][pset]} {len(pset)} "
f"{' '.join([str(p) for p in pset])}\n"
)
f.writelines(lines)
| StarcoderdataPython |
3339504 | import sqlalchemy
import sqlalchemy.orm
from LanguageDeck.models import associations as assoc
from LanguageDeck.models import Base, decks
import datetime
# LanguageB_Example: sentences featuring language B.
# Contains the following relationships
# * many-to-many: LanguageB_Vocab
# * many-to-many: LanguageA_Example
# * many-to-many association: User (record of scores)
class LanguageBExample(Base):
__tablename__ = "lang_b_examples"
id = sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True)
translations = sqlalchemy.orm.relation("LanguageAExample", secondary=assoc.LAE_LBE,
back_populates="translations")
words = sqlalchemy.orm.relation("LanguageBVocab", secondary=assoc.LBV_LBE,
back_populates="examples")
scores = sqlalchemy.orm.relation(assoc.LanguageBExampleScore, cascade="all")
date_created = sqlalchemy.Column("date_created", sqlalchemy.Date, default=datetime.date.today(), nullable=False)
date_last_touched = sqlalchemy.Column("datetime_touched", sqlalchemy.DateTime)
text = sqlalchemy.Column("example", sqlalchemy.Text, nullable=False)
score_type = assoc.LanguageBExampleScore
# LanguageAExample: Sentences featuring language A. Intended for idioms
# Contains the following relationships
# * many-to-many: LanguageBExample
class LanguageAExample(Base):
__tablename__ = "lang_a_examples"
id = sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True)
translations = sqlalchemy.orm.relation("LanguageBExample", secondary=assoc.LAE_LBE,
back_populates="translations")
date_created = sqlalchemy.Column("date_created", sqlalchemy.Date, default=datetime.date.today(), nullable=False)
date_last_touched = sqlalchemy.Column("datetime_touched", sqlalchemy.DateTime)
text = sqlalchemy.Column("example", sqlalchemy.Text, nullable=False)
words = sqlalchemy.orm.relation("LanguageAVocab", secondary=assoc.LAV_LAE,
back_populates="examples")
# Language A vocab deck. In our case, LanguageA is English
class LanguageAVocab(Base):
__tablename__ = "lang_a_vocab"
id = sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True)
text = sqlalchemy.Column("word", sqlalchemy.String, nullable=False)
g_type = sqlalchemy.Column("type", sqlalchemy.String)
date_created = sqlalchemy.Column("date_created", sqlalchemy.Date, default=datetime.date.today(), nullable=False)
date_last_touched = sqlalchemy.Column("date_touched", sqlalchemy.Date)
translations = sqlalchemy.orm.relation("LanguageBVocab", secondary=assoc.LAV_LBV,
back_populates="translations")
examples = sqlalchemy.orm.relation("LanguageAExample", secondary=assoc.LAV_LAE,
back_populates="words")
scores = sqlalchemy.orm.relation(assoc.LanguageAVocabScore, cascade="all, delete")
score_type = assoc.LanguageAVocabScore
example_type = LanguageAExample
# LanguageB_Vocab cards. In our case, LanguageB is German
# There the following relationships between with LanguageB_Vocab:
# * many-to-many: LanguageA_Vocab
# * many-to-many: LanguageB_Examples
# * many-to-many association: User (record of scores)
# * one-to-many: grammar
class LanguageBVocab(Base):
__tablename__ = "lang_b_vocab"
id = sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True)
translations = sqlalchemy.orm.relation("LanguageAVocab", secondary=assoc.LAV_LBV,
back_populates="translations")
examples = sqlalchemy.orm.relation("LanguageBExample", secondary=assoc.LBV_LBE,
back_populates="words")
g_type = sqlalchemy.Column("type", sqlalchemy.Text)
scores = sqlalchemy.orm.relation(assoc.LanguageBVocabScore, cascade="all")
grammar = sqlalchemy.orm.relation("Grammar", cascade="all")
date_created = sqlalchemy.Column("date_created", sqlalchemy.Date, default=datetime.date.today(), nullable=False)
date_last_touched = sqlalchemy.Column("datetime_touched", sqlalchemy.DateTime)
text = sqlalchemy.Column("word", sqlalchemy.Text, nullable=False)
score_type = assoc.LanguageBVocabScore
example_type = LanguageBExample
# Grammar object: Specifies grammatical details of a given word.
# Contains the following relationships
# * many-to-one: LanguageBVocab
class Grammar(Base):
__tablename__ = "grammar"
id = sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True)
word = sqlalchemy.Column("word_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("lang_b_vocab.id"))
paradigm = sqlalchemy.Column("paradigm", sqlalchemy.Text)
irregular = sqlalchemy.Column("irregular", sqlalchemy.Boolean)
# User
class User(Base):
__tablename__ = "users"
id = sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True)
vocab_a_scores = sqlalchemy.orm.relation(assoc.LanguageAVocabScore, cascade="all")
decks = sqlalchemy.orm.relation("Deck", back_populates="user", cascade="all")
name = sqlalchemy.Column("user_name", sqlalchemy.Text)
# lookup dictionary mapping card type to correct translation type
translation_types = {LanguageAVocab: LanguageBVocab,
LanguageBVocab: LanguageAVocab,
LanguageAExample: LanguageBExample,
LanguageBExample: LanguageAExample}
| StarcoderdataPython |
351284 | <gh_stars>0
# coding=utf-8
import os
import shutil
import sys
import time
import math
import cv2
import numpy as np
import tensorflow as tf
import pyarabic.araby as araby
import string
from keras.callbacks import ModelCheckpoint
from keras.utils import to_categorical
import keras.backend as K
from keras.models import load_model
###############################################################
import random
import matplotlib.pyplot as plt
from skimage import io
from scipy.ndimage import interpolation as inter
from typing import Tuple, Union
from skimage.transform import rotate
from skimage.color import rgb2gray
from skimage import data
import skimage.filters as filters
##################################################################
sys.path.append(os.getcwd())
from nets import model_train as model
from utils.rpn_msr.proposal_layer import proposal_layer
from utils.text_connector.detectors import TextDetector
tf.app.flags.DEFINE_string('test_data_path', 'data/demo/', '')
tf.app.flags.DEFINE_string('output_path', 'data/res/', '')
tf.app.flags.DEFINE_string('gpu', '0', '')
tf.app.flags.DEFINE_string('checkpoint_path', 'checkpoints_mlt/', '')
FLAGS = tf.app.flags.FLAGS
def get_images():
files = []
exts = ['jpg', 'png', 'jpeg', 'JPG','tiff']
for parent, dirnames, filenames in os.walk(FLAGS.test_data_path):
for filename in filenames:
for ext in exts:
if filename.endswith(ext):
files.append(os.path.join(parent, filename))
break
print('Find {} images'.format(len(files)))
return files
def resize_image(img):
img_size = img.shape
im_size_min = np.min(img_size[0:2])
im_size_max = np.max(img_size[0:2])
im_scale = float(600) / float(im_size_min)
if np.round(im_scale * im_size_max) > 1200:
im_scale = float(1200) / float(im_size_max)
new_h = int(img_size[0] * im_scale)
new_w = int(img_size[1] * im_scale)
new_h = new_h if new_h // 16 == 0 else (new_h // 16 + 1) * 16
new_w = new_w if new_w // 16 == 0 else (new_w // 16 + 1) * 16
re_im = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return re_im, (new_h / img_size[0], new_w / img_size[1])
def show_img(img, title="test"):
cv2.namedWindow(title,cv2.WINDOW_NORMAL)
cv2.imshow(title, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
def deskew_image(img, boxes):
angle_acc = 0
for i, box in enumerate(boxes):
pts = box[:8].astype(np.int32).reshape((-1, 1, 2))
rect = cv2.minAreaRect(pts)
box = cv2.boxPoints(rect)
box = np.int0(box)
angle = rect[-1]
if angle < -45:
angle = -(90 + angle)
else:
angle = -angle
angle_acc += angle
angle_acc /= len(boxes)
(h, w) = img.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, -angle_acc, 1.0)
try:
img = cv2.warpAffine(img, M, (w, h),flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
except:
pass
return img
def crop_image(img, boxes, write_image=True, verbose=False):
# Sort boxes ascending () Topmost point then Leftmost point )
boxes = np.array( sorted(boxes , key=lambda k: [k[1], k[0]]) )
# Extract interset points to crop receipt
leftmost = max(0, min([ min(boxes[:,0]), min(boxes[:,6])]) ) # max(0,number) to avoid -1 returning
rightmost = max([ max(boxes[:,2]), max(boxes[:,4])])
topmost = max(0, min([ min(boxes[:,1]), min(boxes[:,3])]) ) # max(0,number) to avoid -1 returning
bottommost = max([ max(boxes[:,5]), max(boxes[:,7])])
# Reshape interset points to the following shape [(x1,y1), (x2,y2), (x3,y3), (x4,y4)]
pts = np.array([leftmost, topmost, rightmost,topmost,rightmost, bottommost, leftmost, bottommost])\
.astype(np.int32).reshape((-1, 2))
# Create the receipt bounding rectangle from interset points
rect = cv2.boundingRect(pts)
x, y, w, h = rect
cropped = img[y:y+h, x:x+w]
return cropped, pts
if write_image:
cv2.imwrite(os.path.join(FLAGS.output_path, "cropped_" + img_name.replace('jpeg','tiff').replace('jpg','tiff')), cropped[:, :, ::-1])
def detect_text(img, sess,bbox_pred, cls_pred, cls_prob,input_image,input_im_info,mode='O'):
start = time.time()
h, w, c = img.shape
im_info = np.array([h, w, c]).reshape([1, 3])
bbox_pred_val, cls_prob_val = sess.run([bbox_pred, cls_prob],
feed_dict={input_image: [img],
input_im_info: im_info})
textsegs, _ = proposal_layer(cls_prob_val, bbox_pred_val, im_info)
scores = textsegs[:, 0]
textsegs = textsegs[:, 1:5]
textdetector = TextDetector(DETECT_MODE=mode)
boxes = textdetector.detect(textsegs, scores[:, np.newaxis], img.shape[:2])
boxes = np.array(boxes, dtype=np.int)
cost_time = (time.time() - start)
print("cost time: {:.2f}s".format(cost_time))
return img, boxes
def merge_boxes(boxes):
# img = orig_image.copy()
# Extract interset points to crop receipt
leftmost = max(0, min([ min(boxes[:,0]), min(boxes[:,6])]) ) # max(0,number) to avoid -1 returning
rightmost = max([ max(boxes[:,2]), max(boxes[:,4])])
topmost = max(0, min([ min(boxes[:,1]), min(boxes[:,3])]) ) # max(0,number) to avoid -1 returning
bottommost = max([ max(boxes[:,5]), max(boxes[:,7])])
threshold = 10
merge_count = 0
black_list = []
new_boxes = []
for i, box in enumerate(boxes):
# Skip the merged boxes
if i in black_list:
continue
pts = box[:8].astype(np.int32).reshape((-1, 1, 2))
# cv2.polylines(img, [pts], True, color=(0, 0, 255), thickness=2)
# show_img(img)
# Loop on all boxes after current box
for idx in range(i+1, len(boxes)):
# Skip the merged boxes
if idx in black_list:
continue
# Set temp_box as the next box
tmp_box = boxes[idx]
# Check if Height difference - of one of two corners - less than threshold (i.e the same line)
if abs(tmp_box[1] - box[1]) < threshold or abs(tmp_box[3] - box[3]) < threshold:
black_list.append(idx)
# count how many boxes are merged
merge_count = merge_count + 1
# stretch the original width box to cover the two boxes (Consider stretching from LTR or RTL)
if box[0] >= tmp_box[2]:
box[0] = tmp_box[0]
box[6] = tmp_box[6]
elif box[2] <= tmp_box[0]:
box[2] = tmp_box[2]
box[4] = tmp_box[4]
# selecet the largest height and set the original box to the larger one (to avoid clipping)
max_height_left_corner = np.min( [box[1], box[3], tmp_box[1],tmp_box[3]])
box[1] = box[3] = max_height_left_corner
# selecet the largest lower height and set the original box to the larger one (to avoid clipping)
max_height_right_corner =np.max( [box[5], box[7], tmp_box[5],tmp_box[7]] )
box[5] = box[7] = max_height_right_corner
box[0] = box[6] = leftmost
box[2] = box[4] = rightmost
new_boxes.append(box)
pts = box[:8].astype(np.int32).reshape((-1, 1, 2))
new_boxes = np.array(sorted(new_boxes , key=lambda k: [k[1], k[0]]))
return new_boxes
def sub_line_equation( x1,y1 ,x2,y2 , x=None, y=None):
m = (y1 - y2) / (x1 - x2)
if y is None:
y_calc = m * (x - x1) + y1
return y_calc
elif x is None:
x_calc = ((y - y1) / m) + x1
return x_calc
return (x_calc + x,y_calc + y)
def get_relative_distance(orig_pts, boxes):
line1 = np.reshape([orig_pts[0] , orig_pts[1]], -1)
line2 = np.reshape([orig_pts[0] , orig_pts[3]], -1)
for idx, box in enumerate(boxes):
box = box[:8].astype(np.int32).reshape((-1, 2))
for i in range(0,8,2):
boxes[idx][i] = boxes[idx][i] + sub_line_equation(line2[0],line2[1],line2[2],line2[3], y=boxes[idx][i+1])
boxes[idx][i+1] = boxes[idx][i+1] + sub_line_equation(line1[0],line1[1],line1[2],line1[3], x=boxes[idx][i])
return boxes
def crop_boxes(img,boxes):
lines = []
for i, box in enumerate(boxes):
pts = box[:8].astype(np.int32).reshape((-1, 1, 2))
pts[pts<0] = 0
line_rect = cv2.boundingRect(pts)
x, y, w, h = line_rect
croped_line = img[y:y+h, x:x+w].copy()
croped_line = deskew_image(croped_line, [box])
#croped_line = cv2.cvtColor(croped_line, cv2.COLOR_BGR2GRAY)
#_,croped_line = cv2.threshold(croped_line, 0,255,cv2.THRESH_OTSU)
#croped_line = cv2.cvtColor(croped_line,cv2.COLOR_GRAY2RGB)
lines.append(croped_line)
return lines
def stretch_boxes(input_img_shape, resized_image_shape, boxes):
input_w = input_img_shape[0]
input_h = input_img_shape[1]
resized_w = resized_image_shape[0]
resized_h = resized_image_shape[1]
ratio_w = (input_w / resized_w)
ratio_h = (input_h / resized_h)
for box in boxes:
box[0] *= ratio_w
box[2] *= ratio_w
box[4] *= ratio_w
box[6] *= ratio_w
box[1] *= ratio_h
box[3] *= ratio_h
box[5] *= ratio_h
box[7] *= ratio_h
return boxes
def correct_skew(image, delta=1, limit=5):
def determine_score(arr, angle):
data = inter.rotate(arr, angle, reshape=False, order=0)
histogram = np.sum(data, axis=1)
score = np.sum((histogram[1:] - histogram[:-1]) ** 2)
return histogram, score
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
scores = []
angles = np.arange(-limit, limit + delta, delta)
for angle in angles:
histogram, score = determine_score(thresh, angle)
scores.append(score)
best_angle = angles[scores.index(max(scores))]
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, best_angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
print("image skewed with {} degrees".format(best_angle))
return rotated
def ocrline(line,model,letters):
line = cv2.cvtColor(line,cv2.COLOR_BGR2GRAY)
line = cv2.resize(line, (432, 32))
line = line/255.0
line = np.expand_dims(line, -1)
line = np.expand_dims(line, axis=0)
prediction = model.predict(line)
# use CTC decoder
out = K.get_value(K.ctc_decode(prediction, input_length=np.ones(prediction.shape[0])*prediction.shape[1],
greedy=True)[0][0])
# see the results
i = 0
text = ''
for x in out:
print("predicted text = ", end='')
for p in x:
if int(p) != -1:
print(letters[int(p)], end='')
text += letters[int(p)]
print('\n')
i += 1
return text
def main(argv=None):
if os.path.exists(FLAGS.output_path):
shutil.rmtree(FLAGS.output_path)
os.makedirs(FLAGS.output_path)
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu
with tf.get_default_graph().as_default():
input_image = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='input_image')
input_im_info = tf.placeholder(tf.float32, shape=[None, 3], name='input_im_info')
global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False)
bbox_pred, cls_pred, cls_prob = model.model(input_image)
variable_averages = tf.train.ExponentialMovingAverage(0.997, global_step)
saver = tf.train.Saver(variable_averages.variables_to_restore())
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
ckpt_state = tf.train.get_checkpoint_state(FLAGS.checkpoint_path)
model_path = os.path.join(FLAGS.checkpoint_path, os.path.basename(ckpt_state.model_checkpoint_path))
print('Restore from {}'.format(model_path))
saver.restore(sess, model_path)
im_fn_list = get_images()
letters = araby.LETTERS+string.printable+u'٠١٢٣٤٥٦٧٨٩'
arch_path='nets/ocr/slim_test_model.h5'
weights_path='checkpoints_mlt/ocr/slimCRNN--20--1.542.hdf5'
ocr = load_model(arch_path)
ocr.load_weights(weights_path)
for im_fn in im_fn_list:
img_name = os.path.basename(im_fn)
print('===============')
print(im_fn)
try:
im = cv2.imread(im_fn)[:, :, ::-1]
except:
print("Error reading image {}!".format(im_fn))
continue
input_img = im
# Resize image to be feed to the network
resized_img, (rh, rw) = resize_image(input_img)
#show_img(resized_img,title='original')
# Detect text from the resized image
img, boxes = detect_text(resized_img, sess,bbox_pred, cls_pred, cls_prob,input_image,input_im_info)
# Rescale box size (in order to plot it on the orignal image not the resized version)
boxes = stretch_boxes(input_img.shape,resized_img.shape, boxes)
#crop receipt
img, orig_pts = crop_image(input_img,boxes, False)
#show_img(img,'cropped')
#deskew image
img=correct_skew(img)
#show_img(img,title='after deskew')
# Detect text again from the cropped and deskewed image
img, (rh, rw) = resize_image(img)
img, boxes = detect_text(img, sess,bbox_pred, cls_pred, cls_prob,input_image,input_im_info, mode='H')
mergboxes=merge_boxes(boxes)
lines=crop_boxes(img,mergboxes)
for line in lines:
line=correct_skew(line)
#show_img(line,title='line')
# ocr the line
with open(FLAGS.output_path+im_fn.split('/')[2].split('.')[0]+'_ocr.txt',mode='a',encoding='utf-8') as res:
res.writelines(ocrline(line,ocr,letters))
res.write('\n')
if __name__ == '__main__':
tf.app.run()
| StarcoderdataPython |
1773119 | <gh_stars>0
import logging
from pathvalidate import sanitize_filename
from pathlib import Path
from .utils import validate_url, write_details
from .utils import download as download_file
from .errors import InvalidURL
from .fetcher import *
from .manga import Manga
from .chapter import Chapter
from .downloader import ChapterPageDownloader
from .network import Net
log = logging.getLogger(__name__)
__all__ = (
'download', 'login', 'logout'
)
def login(*args, **kwargs):
"""Login to MangaDex
Do not worry about token session, the library automatically handle this.
Login session will be automtically renewed (unless you called :meth:`logout()`).
Parameters
-----------
password: :class:`str`
Password to login
username: Optional[:class:`str`]
Username to login
email: Optional[:class:`str`]
Email to login
Raises
-------
AlreadyLoggedIn
User are already logged in
ValueError
Parameters are not valid
LoginFailed
Login credential are not valid
"""
Net.requests.login(*args, **kwargs)
def logout():
"""Logout from MangaDex
Raises
-------
NotLoggedIn
User are not logged in
"""
Net.requests.logout()
def download(
url,
folder=None,
replace=False,
compressed_image=False,
start_chapter=None,
end_chapter=None,
no_oneshot_chapter=False
):
"""Download a manga
Parameters
-----------
url: :class:`str`
A MangaDex URL or manga id
folder: :class:`str` (default: ``None``)
Store manga in given folder
replace: :class:`bool` (default: ``False``)
Replace manga if exist
compressed_image: :class:`bool` (default: ``False``)
Use compressed images for low size when downloading manga
start_chapter: :class:`float` (default: ``None``)
Start downloading manga from given chapter
end_chapter: :class:`float` (default: ``None``)
Stop downloading manga from given chapter
no_oneshot_manga: :class:`bool` (default: ``False``)
If exist, don\'t download oneshot chapter
Raises
-------
InvalidURL
Not a valid MangaDex url
InvalidManga
Given manga cannot be found
"""
# Validate start_chapter and end_chapter param
if start_chapter is not None and not isinstance(start_chapter, float):
raise ValueError("start_chapter must be float, not %s" % type(start_chapter))
if end_chapter is not None and not isinstance(end_chapter, float):
raise ValueError("end_chapter must be float, not %s" % type(end_chapter))
log.debug('Validating the url...')
try:
manga_id = validate_url(url)
except InvalidURL as e:
log.error('%s is not valid mangadex url' % url)
raise e from None
# Begin fetching
log.info('Fetching manga %s' % manga_id)
data = get_manga(manga_id)
# Append some additional informations
rels = data['data']['relationships']
author = None
artist = None
cover = None
for rel in rels:
_type = rel.get('type')
_id = rel.get('id')
if _type == 'author':
author = _id
elif _type == 'artist':
artist = _id
elif _type == 'cover_art':
cover = _id
log.debug('Getting author manga')
data['author'] = get_author(author)
log.debug('Getting artist manga')
data['artist'] = get_author(artist)
log.debug('Getting cover manga')
data['cover_art'] = get_cover_art(cover)
manga = Manga(data)
# base path
base_path = Path('.')
# Extend the folder
if folder:
base_path /= folder
base_path /= sanitize_filename(manga.title)
# Create folder
log.debug("Creating folder for downloading")
base_path.mkdir(parents=True, exist_ok=True)
# Cover path
cover_path = base_path / 'cover.jpg'
log.info('Downloading cover manga %s' % manga.title)
download_file(manga.cover_art, str(cover_path), replace=replace)
# Write details.json for tachiyomi local manga
details_path = base_path / 'details.json'
log.info('Writing details.json')
write_details(manga, details_path)
# Fetching chapters
chapters = Chapter(get_all_chapters(manga.id))
# Begin downloading
for vol, chap, images in chapters.iter_chapter_images(
start_chapter,
end_chapter,
no_oneshot_chapter,
compressed_image
):
# Fetching chapter images
log.info('Getting %s from chapter %s' % (
'compressed images' if compressed_image else 'images',
chap
))
images.fetch()
# Create chapter folder
chapter_folder = "" # type: str
# Determine oneshot chapter
if vol == 0 and chap == "none":
chapter_folder += "Oneshot"
elif vol == "none" and chap == "none":
chapter_folder += "Oneshot"
elif vol == "none" and chap == "0":
chapter_folder += "Oneshot"
else:
if vol != 'none':
chapter_folder += 'Volume. %s ' % vol
chapter_folder += 'Chapter. ' + chap
chapter_path = base_path / chapter_folder
if not chapter_path.exists():
chapter_path.mkdir(exist_ok=True)
while True:
error = False
for page, img_url, img_name in images.iter():
img_path = chapter_path / img_name
log.info('Downloading %s page %s' % (chapter_folder, page))
downloader = ChapterPageDownloader(
img_url,
img_path,
replace=replace
)
success = downloader.download()
# One of MangaDex network are having problem
# Fetch the new one, and start re-downloading
if not success:
log.error('One of MangaDex network are having problem, re-fetching the images...')
log.info('Getting %s from chapter %s' % (
'compressed images' if compressed_image else 'images',
chap
))
error = True
images.fetch()
break
else:
continue
if not error:
break
log.info("Download finished for manga \"%s\"" % manga.title)
return manga | StarcoderdataPython |
3566013 | <gh_stars>0
print('\033[31m=-=\033[m' * 19)
print('\033[31mTABUADA DE QUALQUER VALOR [Valores negativos finalizam]\033[m')
print('\033[31m=-=\033[m' * 19)
c = 0
n = int(input('Deseja a tabuada de qual valor? '))
print('===' * 10)
while True:
if n < 0:
break
c = c + 1
operacao = n * c
print(f'{n} x {c} = {operacao}')
if c > 9:
c = 0
print('===' * 10)
n = int(input('Deseja a tabuada de qual valor? '))
print('===' * 10)
print('Tabuada finalizada! Volte sempre!')
#ALTERNATIVA
#while True:
# n = int(input('Deseja a tabuada de qual valor? ')
# print('---' * 10)
# if n <= 0:
# break
# for c in range(1, 11):
# print(f'{c} x {n} = {c * n})
# print('---' * 10)
#print('Tabuada Finalizada!')
| StarcoderdataPython |
3407567 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import requests
from requests.auth import HTTPBasicAuth
from xossynchronizer.steps.syncstep import SyncStep, DeferredException
from xossynchronizer.modelaccessor import model_accessor
from xossynchronizer.modelaccessor import ONOSApp, ServiceInstance, ServiceInstanceAttribute
from xosconfig import Config
from multistructlog import create_logger
from helpers import Helpers
log = create_logger(Config().get('logging'))
log.info("config file", file=Config().get_config_file())
class SyncONOSApp(SyncStep):
provides = [ONOSApp]
observes = [ONOSApp, ServiceInstanceAttribute]
def get_service_instance_attribute(self, o):
# NOTE this method is defined in the core convenience methods for service_instances
svc = ServiceInstance.objects.get(id=o.id)
return svc.serviceinstanceattribute_dict
def check_app_dependencies(self, deps):
"""
Check if all the dependencies required by this application are installed
:param deps: comma separated list of application names
:return: bool
"""
if not deps:
return True
for dep in [x.strip() for x in deps.split(',') if x is not ""]:
try:
app = ONOSApp.objects.get(app_id=dep)
if not app.backend_code == 1:
# backend_code == 1 means that the app has been pushed
return False
except IndexError, e:
return False
return True
def add_config(self, o):
log.info("Adding config %s" % o.name, model=o.tologdict())
# getting onos url and auth
onos_url = "%s:%s" % (Helpers.format_url(o.service_instance.leaf_model.owner.leaf_model.rest_hostname), o.service_instance.leaf_model.owner.leaf_model.rest_port)
onos_basic_auth = HTTPBasicAuth(o.service_instance.leaf_model.owner.leaf_model.rest_username, o.service_instance.leaf_model.owner.leaf_model.rest_password)
# push configs (if any)
url = o.name
if url[0] == "/":
# strip initial /
url = url[1:]
url = '%s/%s' % (onos_url, url)
value = json.loads(o.value)
request = requests.post(url, json=value, auth=onos_basic_auth)
if request.status_code != 200:
log.error("Request failed", response=request.text)
raise Exception("Failed to add config %s in ONOS: %s" % (url, request.text))
def activate_app(self, o, onos_url, onos_basic_auth):
log.info("Activating app %s" % o.app_id)
url = '%s/onos/v1/applications/%s/active' % (onos_url, o.app_id)
request = requests.post(url, auth=onos_basic_auth)
if request.status_code != 200:
log.error("Request failed", response=request.text)
raise Exception("Failed to add application %s to ONOS: %s" % (url, request.text))
url = '%s/onos/v1/applications/%s' % (onos_url, o.app_id)
request = requests.get(url, auth=onos_basic_auth)
if request.status_code != 200:
log.error("Request failed", response=request.text)
raise Exception("Failed to read application %s from ONOS: %s" % (url, request.text))
else:
o.version = request.json()["version"]
def check_app_installed(self, o, onos_url, onos_basic_auth):
log.debug("Checking if app is installed", app=o.app_id)
url = '%s/onos/v1/applications/%s' % (onos_url, o.app_id)
request = requests.get(url, auth=onos_basic_auth)
if request.status_code == 200:
if "version" in request.json() and o.version == request.json()["version"]:
log.debug("App is installed", app=o.app_id)
return True
else:
# uninstall the application
self.uninstall_app(o, onos_url, onos_basic_auth)
return False
if request.status_code == 404:
# app is not installed at all
return False
else:
log.error("Request failed", response=request.text)
raise Exception("Failed to read application %s from ONOS aaa: %s" % (url, request.text))
def install_app(self, o, onos_url, onos_basic_auth):
log.info("Installing app from url %s" % o.url, app=o.app_id, version=o.version)
# check is the already installed app is the correct version
is_installed = self.check_app_installed(o, onos_url, onos_basic_auth)
if is_installed:
# if the app is already installed we don't need to do anything
log.info("App is installed, skipping install", app=o.app_id)
return
data = {
'activate': True,
'url': o.url
}
url = '%s/onos/v1/applications' % onos_url
request = requests.post(url, json=data, auth=onos_basic_auth)
if request.status_code == 409:
log.info("App was already installed", app=o.app_id, test=request.text)
return
if request.status_code != 200:
log.error("Request failed", response=request.text)
raise Exception("Failed to add application %s to ONOS: %s" % (url, request.text))
log.debug("App from url %s installed" % o.url, app=o.app_id, version=o.version)
url = '%s/onos/v1/applications/%s' % (onos_url, o.app_id)
request = requests.get(url, auth=onos_basic_auth)
if request.status_code != 200:
log.error("Request failed", response=request.text)
raise Exception("Failed to read application %s from ONOS: %s while checking correct version" % (url, request.text))
else:
if o.version != request.json()["version"]:
raise Exception("The version of %s you installed (%s) is not the same you requested (%s)" % (o.app_id, request.json()["version"], o.version))
def sync_record(self, o):
log.info("Sync'ing", model=o.tologdict())
if hasattr(o, 'service_instance'):
# this is a ServiceInstanceAttribute model just push the config
if 'ONOSApp' in o.service_instance.leaf_model.class_names:
return self.add_config(o)
return # if it's not an ONOSApp do nothing
if not self.check_app_dependencies(o.dependencies):
raise DeferredException('Deferring installation of ONOSApp with id %s as dependencies are not met' % o.id)
# getting onos url and auth
onos_url = "%s:%s" % (Helpers.format_url(o.owner.leaf_model.rest_hostname), o.owner.leaf_model.rest_port)
onos_basic_auth = HTTPBasicAuth(o.owner.leaf_model.rest_username, o.owner.leaf_model.rest_password)
# activate app (bundled in onos)
if not o.url or o.url is None:
self.activate_app(o, onos_url, onos_basic_auth)
# install an app from a remote source
if o.url and o.url is not None:
self.install_app(o, onos_url, onos_basic_auth)
def delete_config(self, o):
log.info("Deleting config %s" % o.name)
# getting onos url and auth
onos_app = o.service_instance.leaf_model
onos_url = "%s:%s" % (Helpers.format_url(onos_app.owner.leaf_model.rest_hostname), onos_app.owner.leaf_model.rest_port)
onos_basic_auth = HTTPBasicAuth(onos_app.owner.leaf_model.rest_username, onos_app.owner.leaf_model.rest_password)
url = o.name
if url[0] == "/":
# strip initial /
url = url[1:]
url = '%s/%s' % (onos_url, url)
request = requests.delete(url, auth=onos_basic_auth)
if request.status_code != 204:
log.error("Request failed", response=request.text)
raise Exception("Failed to remove config %s from ONOS: %s" % (url, request.text))
def uninstall_app(self,o, onos_url, onos_basic_auth):
log.info("Uninstalling app %s" % o.app_id)
url = '%s/onos/v1/applications/%s' % (onos_url, o.app_id)
request = requests.delete(url, auth=onos_basic_auth)
if request.status_code != 204:
log.error("Request failed", response=request.text)
raise Exception("Failed to delete application %s from ONOS: %s" % (url, request.text))
def deactivate_app(self, o, onos_url, onos_basic_auth):
log.info("Deactivating app %s" % o.app_id)
url = '%s/onos/v1/applications/%s/active' % (onos_url, o.app_id)
request = requests.delete(url, auth=onos_basic_auth)
if request.status_code != 204:
log.error("Request failed", response=request.text)
raise Exception("Failed to deactivate application %s from ONOS: %s" % (url, request.text))
def delete_record(self, o):
if hasattr(o, 'service_instance'):
# this is a ServiceInstanceAttribute model
if 'ONOSApp' in o.service_instance.leaf_model.class_names:
return self.delete_config(o)
return # if it's not related to an ONOSApp do nothing
# NOTE if it is an ONOSApp we don't care about the ServiceInstanceAttribute
# as the reaper will delete it
# getting onos url and auth
onos_url = "%s:%s" % (Helpers.format_url(o.owner.leaf_model.rest_hostname), o.owner.leaf_model.rest_port)
onos_basic_auth = HTTPBasicAuth(o.owner.leaf_model.rest_username, o.owner.leaf_model.rest_password)
# deactivate an app (bundled in onos)
if not o.url or o.url is None:
self.deactivate_app(o, onos_url, onos_basic_auth)
# uninstall an app from a remote source, only if it has been activated before
if o.url and o.url is not None:
self.uninstall_app(o, onos_url, onos_basic_auth)
| StarcoderdataPython |
4878955 | #!/usr/bin/env python3
################################################################################
# Copyright (c) 2020, Arm Limited
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
import os
import logging
import silicon_libs.testchip as testchip
def bin_search(min_limit, max_limit, depth, func, params, is_int=False):
"""Implement generic binary search algorithm
:param min_limit: The minimum range limit
:type min_limit: int or float (must set is_int if integer)
:param max_limit: The maximum range limit
:type max_limit: int or float
:param depth: The recursion depth limit
:type depth: int
:param func: Function for evaluating result. Takes in the new value to
set and a dictionary of parameters.
:type func: function
:param params: Dictionary of params to pass to to func, including a
logger object
:param is_int: Set if the values to binary search are integers
:type is_int: bool, optional
"""
new_val = (float(max_limit) + float(min_limit))/2.0
params['logger'].info("-------------------------------")
if is_int:
new_val = int(new_val)
params['logger'].info("min: {}, max: {}, mid: {}".format(
min_limit,max_limit,new_val))
params['logger'].info("min: 0x{:04X}, max: 0x{:04X}, mid: {:04}".format(
min_limit,max_limit,new_val))
else:
params['logger'].info("min: {:0.4f}, max: {:0.4f}, mid: {:0.4f}".format(
min_limit,max_limit,new_val))
results = func(new_val,params)
depth -= 1
if depth < 0:
params['logger'].info("Maximum depth reached")
return {'results' : results, 'min' : min_limit, 'max' : max_limit}
if min_limit == max_limit:
params['logger'].info("Converged")
return {'results' : results, 'min' : min_limit, 'max' : max_limit}
if results['bin_value'] == 1:
return bin_search(new_val, max_limit, depth, func, params, is_int=is_int)
elif results['bin_value'] == 0:
return bin_search(min_limit, new_val, depth, func, params, is_int=is_int)
else:
raise ValueError("Invalid bin_value: {}".format(results['bin_value']))
def example_rtc_trim(chip, logger, time_period_s=10, max_iterations=10):
"""Example of a trimming a chips RTC using a linear search. Just an example, use the methods provided in the M0N0S2 class to trim the RTC instead
"""
# PCSM cannot be read, so using model value (the PoR value unless changed
# previously by this script).
logger.info("Enable FBB (may already be set by embedded software anyway):")
chip.pcsm.write('rtc_ctrl1',1,bit_group='en_fbb')
current_rtc_trim = chip.pcsm.read(
'rtc_ctrl0',bit_group='trim_res_tune')
logger.info("Starting RTC trim value: 0x{:08X}".format(current_rtc_trim))
iteration = 0
while True:
current_rtc_trim = chip.pcsm.read(
'rtc_ctrl0',bit_group='trim_res_tune')
logger.info("Iteration: {:d}:: Trim: 0x{:08X}".format(iteration,
current_rtc_trim))
res = utils.measure_rtc(chip, logger, time_period_s)
if (res['error_pc'] > 0):
logger.info("Decreasing trim...")
chip.pcsm.write(
'rtc_ctrl0',
current_rtc_trim - 1,
bit_group='trim_res_tune')
else:
logger.info("Increasing trim...")
chip.pcsm.write(
'rtc_ctrl0',
current_rtc_trim + 1,
bit_group='trim_res_tune')
def derive_adp_port(logger):
"""Automatically derive the ADP port (only works on MacOS (perhaps Linux) and with the Demoboard
:param logger: The logger object
:type logger: logger.Logger object
:return: The full ADP port path
:rtype: str
"""
logger.warning("Auto ADP port - Mac (many linux) ONLY!")
if not os.path.isdir('/dev'):
raise IOError("Could not open /dev directory. This only works in Mac/Linux")
adp_ports = [x for x in os.listdir('/dev') if \
x.startswith('cu.usbserial-A')]
if len(adp_ports) < 1:
print(adp_ports)
raise IOError("Could not find the ADP port")
if len(adp_ports) > 1:
raise IOError("Multiple possible ADP ports found: {}".format(
adp_ports))
return os.path.join('/dev',adp_ports[0])
def setup_logger(logger, level, log_filepath, prefix=None):
"""Sets up and returns a logger object
:param logger: The logger object for logging messages to the console
and file
:type logger: logging.Logger object
:param level: The minimum log level to print to the console
:type level: str (one of "DEBUG", "INFO", "WARN", "ERROR")
:param log_filepath: Path to file to log messages to"
:type log_filepath: str
:param prefix: Custom text to prefix to all logging output (i.e. to
distinguish it from other loggers when using multiple
loggers simultaneously).
:type prefix: str, optional
:return: The logger object for logging messages to the console
and file
:rtype: logging.Logger object
"""
prefix = "("+str(prefix)+") " if prefix else ""
temp_logger = logger
temp_log_level = logging.getLevelName(level)
logger.setLevel(temp_log_level)
# create a file handler
temp_handler = logging.FileHandler(log_filepath,mode='w')
temp_handler.setLevel(logging.DEBUG)
# create a formatter
temp_formatter = logging.Formatter('%(levelname)s:%(asctime)s:%(name)s:%(filename)s:%(lineno)s:%(funcName)s():'+prefix+'%(message)s')
temp_handler.setFormatter(temp_formatter)
# add file handler to logger
temp_logger.addHandler(temp_handler)
# create handler for screen
console_handler = logging.StreamHandler()
console_formatter = logging.Formatter(
'%(levelname)s (%(filename)s:%(lineno)s): '+prefix+'%(message)s')
console_handler.setFormatter(console_formatter)
temp_logger.addHandler(console_handler)
return temp_logger
def process_adp_tx_params(tx_params):
"""Takes the parameter text from ADP TX params section and converts to dictionary
:param tx_params: The parameter text from the ADP TX parameter section
:type tx_params: str
:return: Result dictionary with the keys and values
:rtype: dict
"""
# input is a string - convert to dict
def is_int(value):
try:
int(value,0)
return True
except ValueError:
return False
tx_params = [x.strip() for x in tx_params.strip().split('\n')]
res = { x.split(':')[0].strip() : int(x.split(':')[1]) if \
is_int(x.split(':')[1]) else x.split(':')[1].strip() \
for x in tx_params}
return res
class AudioReader:
"""Class for decoding audio ADP transactions received from M0N0 (demoboard)
"""
def __init__(self, logger, save_path='temp.wav'):
self._logger = logger
self._save_path = save_path
def demoboard_audio(self, tx_name, tx_params, tx_payload):
"""Decodes the audio data and parameters received via the ADP TX. Saves audio data in a WAVE file.
:param tx_name: The name of the transaction
:type tx_name: str
:param tx_params: The raw text from the parameter part of the ADP TX
:type tx_params: str
:param tx_payload: The raw text from the payload of the ADP TX
:type tx_payload: str
"""
audio_frame = []
tx_params = process_adp_tx_params(tx_params)
sample_freq_hz = 8000
print(tx_params)
if tx_params:
if 'sample_freq_hz'in tx_params:
sample_freq_hz = int(tx_params['sample_freq_hz'])
self._logger.info("Sample frequency (Hz): {:d}".format(
tx_params['sample_freq_hz']))
if 'period_rtc_ticks'in tx_params:
self._logger.info("RTC tick period: {:d}".format(
tx_params['period_rtc_ticks']))
record_time_s = None
if 'recording_rtc_cycles' in tx_params:
record_time_s = tx_params['recording_rtc_cycles'] * (1.0/33e3)
self._logger.info("Record time: {:0.2f} s (RTC cycles: {:d})"\
.format(
record_time_s,
tx_params['recording_rtc_cycles']))
audio_lines = [x.strip() for x in tx_payload.strip().split('\n')]
audio_words = [int(x,0) for x in audio_lines]
def twos_comp(val, bits):
#return val ^ (1<<7)
return val
"""compute the 2's complement of int value val"""
if (val & (1 << (bits - 1))) != 0:
val = val - (1 << bits)
return val
for word in audio_words:
audio_frame.append(twos_comp((word>>24) & 0xFF, 8))
audio_frame.append(twos_comp((word>>16) & 0xFF, 8))
audio_frame.append(twos_comp((word>>8) & 0xFF, 8))
audio_frame.append(twos_comp((word) & 0xFF,8))
self._logger.info("Number of samples: {:d}".format(len(audio_frame)))
if record_time_s:
sample_freq_hz = int(len(audio_frame) / record_time_s)
self._logger.info("Calculated freq is: {:d} Hz".format(
sample_freq_hz))
import wave
import struct
wavefile = wave.open(self._save_path, 'w')
wavefile.setparams((1, 1, sample_freq_hz, 0, 'NONE', 'not compressed'))
for sample in audio_frame:
sample = sample + 128
data = struct.pack('<h', sample)
wavefile.writeframesraw( data )
wavefile.close()
"""Returns the default ADP TX callbacks
"""
def get_default_adp_tx_callbacks():
return {
'demoboard_audio' : demoboard_audio
}
| StarcoderdataPython |
6606076 | <reponame>tolsac/gae-rest-framework
"""
Custom responses
"""
import six
import collections
import webob
import json
from restae.conf import settings
class CorsResponse(webob.Response):
def __init__(self, *args, **kwargs):
super(CorsResponse, self).__init__(*args, **kwargs)
self.headers['Access-Control-Allow-Origin'] = ', '.join(settings.get('CORS_ALLOW_ORIGIN'))
self.headers['Access-Control-Allow-Headers'] = ', '.join(settings.get('CORS_ALLOW_HEADERS'))
self.headers['Access-Control-Allow-Methods'] = ', '.join(settings.get('CORS_ALLOW_METHODS'))
class JsonResponse(CorsResponse):
def __init__(self, *args, **kwargs):
data = None
if 'data' in kwargs:
data = kwargs.pop('data')
super(JsonResponse, self).__init__(*args, **kwargs)
self.headers['Content-type'] = 'application/json'
if isinstance(data, (collections.Mapping, list, set, tuple)):
self.body = json.dumps(data)
elif isinstance(data, six.string_types):
self.body = json.dumps({
'message': data
})
elif data is not None:
self.body = data
| StarcoderdataPython |
5153416 | <filename>GearBot/Util/DashConfig.py
import asyncio
from pytz import UnknownTimeZoneError, timezone
from Util import GearbotLogging, Utils, Translator, Configuration, Permissioncheckers
from database.DatabaseConnector import Infraction
BOT = None
def initialize(bot_in):
global BOT
BOT = bot_in
class ValidationException(Exception):
def __init__(self, errors) -> None:
self.errors = errors
def check_type(valid_type, allow_none=False, **illegal):
def checker(guild, value, *_):
if value is None and not allow_none:
return "This value can not be none"
if not isinstance(value, valid_type):
return f"This isn't a {valid_type}"
if value in illegal:
return "This value is not allowed"
return True
return checker
def validate_list_type(valid_type, allow_none=False, **illegal):
def checker(guild, bad_list, preview, user, *_):
for value in bad_list:
if value is not None and not allow_none:
return f"A value in the group, '{value}', was not defined!"
if not isinstance(value, valid_type):
return f"A value in the group, '{value}', is the wrong type! It should be a {valid_type}"
if value in illegal:
return f"A value in the group, '{value}', is not allowed!"
return True
return checker
def validate_timezone(guild, value, preview, user, *_):
try:
timezone(value)
return True
except UnknownTimeZoneError:
return "Unknown timezone"
def validate_role(allow_everyone=False, allow_zero=False):
def validator(guild, role_id, preview, user, new_values):
role_type = list(new_values.keys())[0]
role = guild.get_role(role_id)
if role is None and not allow_zero:
return "Unable to find a role with that id on the server"
if guild.id == role_id and not allow_everyone:
return "You can't use the '@everyone' role here!"
if role_type == "SELF_ROLES":
role = guild.get_role(role)
if not (guild.me.top_role > role and role.managed == False):
return "The specified role can not be managed by Gearbot!"
return True
return validator
def validate_role_list(guild, role_list, preview, user, new_values):
rolelist_type = list(new_values.keys())[0]
last_role_id = None
for role_id in role_list:
# Make sure the roles are the right type
if not isinstance(role_id, int):
return f"One of the roles, '{role}', is not a integer!"
# Check if the role exists
if role_id is None:
return f"One of the roles, '{role}', is not valid!"
if role_id == guild.id:
return "You can't use the '@everyone' role here!"
# Validate that there are no duplicate roles in the list
if role_id == last_role_id:
return f"The role '{role}' was specified twice!"
if rolelist_type == "SELF_ROLES":
role = guild.get_role(role_id)
if not (guild.me.top_role > role and role.managed == False):
return f"The specified role, {role_id}, can not be managed by Gearbot!"
last_role = role_id
return True
def check_number_range(lower, upper):
def checker(guild, value, preview, user, *_):
if value < lower:
return f"Value too low, must be at least {lower}"
if value > upper:
return f"Value too high, must be at most {upper}"
return True
return checker
def multicheck(*args):
def check(*checkargs):
for arg in args:
validator = arg(*checkargs)
if validator is not True:
return validator
return True
return check
def perm_range_check(lower, upper, other_min=None):
def check(guild, value, preview, user, *_):
user_lvl = Permissioncheckers.user_lvl(user)
new_upper = min(upper, user_lvl)
new_lower = lower
if other_min is not None:
new_lower = max(lower, preview[other_min])
return check_number_range(new_lower, new_upper)(guild, value, preview, user)
return check
def log_validator(guild, key, value, preview, *_):
# make sure it's a dict
if not isinstance(value, dict):
return "Must be a dict"
# validate channel itself
if not is_numeric(key):
return "Invalid channel id"
channel = BOT.get_channel(int(key))
if channel is None:
return "Unknown channel"
if channel.guild != guild and channel.guild.id not in Configuration.get_var(guild.id, "SERVER_LINKS"):
return "You can not log to this guild"
perms = channel.permissions_for(guild.me)
required = ["send_messages", "embed_links", "attach_files"]
missing = [r for r in required if not getattr(perms, r)]
if len(missing) != 0:
return "Missing the following permission(s): {}".format(", ".join(missing))
# validate subsections
required = ["CATEGORIES", "DISABLED_KEYS"]
missing = [r for r in required if r not in value]
if len(missing) != 0:
return "Missing the required attribute(s): {}".format(", ".join(missing))
# make sure we are not getting extra junk
excess = [k for k in value if k not in required]
if len(excess) != 0:
return "Unknown attribute(s): {}".format(", ".join(excess))
# validate categories
cats = value["CATEGORIES"]
if not isinstance(cats, list):
return "CATEGORIES must be a list"
if len(cats) == 0:
return "CATEGORIES can not be empty"
unknown_cats = [cat for cat in cats if cat not in GearbotLogging.LOGGING_INFO]
if len(unknown_cats) != 0:
return "Invalid value(s) found in CATEGORIES: {}".format(", ".join(unknown_cats))
# find unknown disabled keys
disabled = value["DISABLED_KEYS"]
unknown_keys = [d for d in disabled if d not in
[item for sublist in [subkey for subkey in {k: list(v.keys()) for k, v in GearbotLogging.LOGGING_INFO.items()}.values()]
for item in sublist]
]
if len(unknown_keys) != 0:
return "Unknown logging key(s) in DISABLED_KEYS: {}".format(", ".join(unknown_keys))
# check if they didn't disable all subkeys
for cat, keys in GearbotLogging.LOGGING_INFO.items():
if cat in cats and cat != "FUTURE_LOGS":
has_logging = False
for key in keys:
if key not in disabled:
has_logging = True
break
if not has_logging:
return f"The {cat} category was enabled but all of it's subkeys where disabled, please leave at least one subkey enabled or remove the category from the CATEGORIES list"
# check for disabled keys where the category isn't even enabled
keys = [d for d in disabled if d not in [item for sublist in
[subkey for subkey in {k: list(v.keys()) for k, v in GearbotLogging.LOGGING_INFO.items()
if k in cats}.values()]
for item in sublist]
]
if len(keys) != 0:
return "The following key(s) are disabled but the category they belong to isn't activated: ".format(
", ".join(keys))
return True
VALIDATORS = {
"GENERAL": {
"PREFIX": multicheck(
check_type(str),
lambda g, v, *_: "Prefix too long" if len(v) > 10 else "Prefix can't be blank" if len(v) == 0 else True),
"LANG": lambda g, v, *_: v in Translator.LANGS or "Unknown language",
"PERM_DENIED_MESSAGE": check_type(bool),
"TIMESTAMPS": check_type(bool),
"NEW_USER_THRESHOLD": multicheck(check_type(int), check_number_range(0, 60 * 60 * 24 * 14)),
"TIMEZONE": validate_timezone
},
"PERMISSIONS": {
"LVL4_ROLES": validate_role_list,
"ADMIN_ROLES": validate_role_list,
"MOD_ROLES": validate_role_list,
"TRUSTED_ROLES": validate_role_list,
},
"ROLES": {
"SELF_ROLES": validate_role_list,
"ROLE_LIST": validate_role_list,
"ROLE_WHITELIST": check_type(bool),
"MUTE_ROLE": multicheck(check_type(int), validate_role(allow_zero=True))
},
"DASH_SECURITY": {
"ACCESS": perm_range_check(1, 5),
"INFRACTION": perm_range_check(1, 5, other_min="ACCESS"),
"VIEW_CONFIG": perm_range_check(1, 5, other_min="ACCESS"),
"ALTER_CONFIG": perm_range_check(2, 5, other_min="VIEW_CONFIG")
},
"LOG_CHANNELS": log_validator
}
def role_list_logger(t):
def handler(guild, old, new, user_parts):
removed = list(set(old) - set(new))
added = list(set(new) - set(old))
for r in removed:
role = guild.get_role(int(r))
role_name = Utils.escape_markdown(role.name) if role is not None else r
GearbotLogging.log_key(
guild.id,
f"config_change_role_removed",
role_name=role_name, role_id=r, type=t,
**user_parts
)
for r in added:
role = guild.get_role(int(r))
role_name = Utils.escape_markdown(role.name) if role is not None else r
GearbotLogging.log_key(
guild.id,
f"config_change_role_added",
role_name=role_name,
role_id=r,
type=t,
**user_parts
)
return handler
async def role_remover(active_mutes, guild, role):
for mute in active_mutes:
member = guild.get_member(mute.user_id)
if member is not None:
await member.remove_roles(role)
async def role_adder(active_mutes, guild, role):
for mute in active_mutes:
member = guild.get_member(mute.user_id)
if member is not None:
await member.add_roles(role)
def swap_mute_role(guild, old, new, parts):
active_mutes = Infraction.select().where(
(Infraction.type == "Mute") & (Infraction.guild_id == guild.id) & Infraction.active)
loop = asyncio.get_running_loop()
old_role = guild.get_role(old)
new_role = guild.get_role(new)
parts.update(
old_id=old,
old_name=Utils.escape_markdown(old_role.name) if old_role is not None else old,
new_id=new,
new_name=Utils.escape_markdown(new_role.name) if new_role is not None else new,
count=len(active_mutes)
)
if old != 0:
if old_role is not None:
loop.create_task(role_remover(active_mutes, guild, old_role))
if new != 0:
GearbotLogging.log_key(guild.id, "config_mute_role_changed", **parts)
else:
GearbotLogging.log_key(guild.id, "config_mute_role_disabled", **parts, )
if new != 0:
if new_role is not None:
loop.create_task(role_adder(active_mutes, guild, new_role))
if old == 0:
GearbotLogging.log_key(guild.id, "config_mute_role_set", **parts)
def self_role_updater(guild, old, new, parts):
role_list_logger("SELF")(guild, old, new, parts)
BOT.dispatch("self_roles_update", guild.id)
def dash_perm_change_logger(t):
def handler(guild, old, new, parts):
GearbotLogging.log_key(
guild.id,
f"config_dash_security_change",
type=Translator.translate(f"config_dash_security_{t.lower()}", guild.id),
old=Translator.translate(f"perm_lvl_{old}", guild.id),
new=Translator.translate(f"perm_lvl_{new}", guild.id), **parts
)
return handler
def log_channel_logger(key, guild, old, new, parts):
# info about the channel
parts.update({
"channel": f"<#{key}>",
"channel_id": key
})
if new is None:
# new channel
parts.update({
"count": len(old["CATEGORIES"]),
"categories": ", ".join(old["CATEGORIES"]),
"key_count": len(old["DISABLED_KEYS"]),
"keys": ", ".join(old["DISABLED_KEYS"]),
})
GearbotLogging.log_key(
guild.id,
f'logging_channel_removed{"_with_disabled" if parts["key_count"] > 0 else ""}',
**parts
)
elif old is None:
# removed channel
parts.update({
"count": len(new["CATEGORIES"]),
"categories": ", ".join(new["CATEGORIES"]),
"key_count": len(new["DISABLED_KEYS"]),
"keys": ", ".join(new["DISABLED_KEYS"]),
})
GearbotLogging.log_key(
guild.id,
f'logging_channel_added{"_with_disabled" if parts["key_count"] > 0 else ""}',
**parts
)
else:
# added categories
new_cats = set(new["CATEGORIES"]) - set(old["CATEGORIES"])
if len(new_cats) > 0:
GearbotLogging.log_key(
guild.id, "logging_category_added",
**parts, count=len(new_cats),
categories=", ".join(new_cats)
)
# removed categories
removed_cats = set(old["CATEGORIES"]) - set(new["CATEGORIES"])
if len(removed_cats) > 0:
GearbotLogging.log_key(
guild.id,
"logging_category_removed",
**parts,
count=len(removed_cats),
categories=", ".join(removed_cats)
)
# added disabled keys
disabled_keys = set(new["DISABLED_KEYS"]) - set(old["DISABLED_KEYS"])
if len(disabled_keys) > 0:
GearbotLogging.log_key(
guild.id,
"logging_key_disabled",
**parts,
count=len(disabled_keys),
disabled=", ".join(disabled_keys)
)
# removed disabled keys, but only those who"s category is still active
enabled_keys = set(old["DISABLED_KEYS"]) - set(new["DISABLED_KEYS"]) - set(
[item for sublist in [subkey for subkey in {k: list(v.keys()) for k, v in GearbotLogging.LOGGING_INFO.items()
if k in removed_cats}.values()]
for item in sublist]
)
if len(enabled_keys) > 0:
GearbotLogging.log_key(
guild.id,
"logging_key_enabled",
**parts, count=len(enabled_keys),
enabled=", ".join(enabled_keys)
)
SPECIAL_HANDLERS = {
"ROLES": {
"MUTE_ROLE": swap_mute_role,
"SELF_ROLES": self_role_updater,
},
"PERMISSIONS": {
"LVL4_ROLES": role_list_logger("LVL4"),
"ADMIN_ROLES": role_list_logger("ADMIN"),
"MOD_ROLES": role_list_logger("MOD"),
"TRUSTED_ROLES": role_list_logger("TRUSTED"),
},
"DASH_SECURITY": {
"ACCESS": dash_perm_change_logger("ACCESS"),
"INFRACTION": dash_perm_change_logger("INFRACTION"),
"VIEW_CONFIG": dash_perm_change_logger("VIEW_CONFIG"),
"ALTER_CONFIG": dash_perm_change_logger("ALTER_CONFIG")
},
"LOG_CHANNELS": log_channel_logger
}
def is_numeric(value):
if type(value) == bool:
return False
try:
int(value)
return True
except ValueError:
return False
def convert_back(target):
# We check a lot of strings, put this first
if type(target) == str:
return target
elif isinstance(target, dict):
return {k: convert_back(v) for k, v in target.items()}
elif isinstance(target, list):
return [convert_back(t) for t in target]
elif is_numeric(target):
return int(target)
else:
return target
def update_config_section(guild, section, new_values, user, replace=False):
fields = VALIDATORS[section]
errors = dict()
guild_config = Configuration.get_var(guild.id, section)
new_values = convert_back(new_values)
modified_values = new_values.copy()
preview = guild_config.copy()
preview.update(**new_values)
for k, v in new_values.items():
if not replace and k in guild_config and guild_config[k] == v:
modified_values.pop(k)
elif isinstance(fields, dict):
if k not in fields:
errors[k] = "Unknown key"
else:
validated = fields[k](guild, v, preview, user, new_values)
if validated is not True:
errors[k] = validated
else:
validated = fields(guild, k, v, preview, user, new_values)
if validated is not True:
errors[k] = validated
if replace:
for k in Configuration.TEMPLATE[section].keys():
if k not in new_values:
errors[k] = "Missing field"
if len(modified_values) == 0:
errors[section] = "Nothing to save!"
if len(errors) > 0:
raise ValidationException(errors)
user_parts = {
"user": Utils.clean_user(user),
"user_id": user.id
}
old = dict(**guild_config)
if replace:
Configuration.set_cat(guild.id, section, modified_values)
else:
guild_config.update(**modified_values)
Configuration.save(guild.id)
for k, v in modified_values.items():
o = old[k] if k in old else None
new = modified_values[k]
if section in SPECIAL_HANDLERS:
s = SPECIAL_HANDLERS[section]
if isinstance(s, dict) and k in s:
s[k](guild, o, new, user_parts)
else:
s(k, guild, o, new, user_parts)
else:
GearbotLogging.log_key(
guild.id,
"config_change",
option_name=Translator.translate(f"config_{section}_{k}".lower(), guild),
old=o, new=new, **user_parts
)
if replace:
for k in (set(old.keys()) - set(modified_values.keys())):
o = old[k]
if section in SPECIAL_HANDLERS:
s = SPECIAL_HANDLERS[section]
if isinstance(s, dict) and k in s:
s[k](guild, o, None, user_parts)
else:
s(k, guild, o, None, user_parts)
else:
GearbotLogging.log_key(
guild.id,
"config_change",
option_name=Translator.translate(f"config_{section}_{k}".lower(), guild),
old=old, new=None, **user_parts
)
to_return = {
k: [str(rid) if isinstance(rid, int) else rid for rid in v] if isinstance(v, list)
else str(v) if isinstance(v, int) else v for k, v in Configuration.get_var(guild.id, section).items()
}
return dict(status="Updated", modified_values=to_return)
| StarcoderdataPython |
9638964 | <reponame>IronVenom/encdecpy
tabula_recta = []
alphabets = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
for i in range(26):
temp = ['']*26
shiftedalphabets = alphabets[i:] + alphabets[:i]
for j in range(26):
temp[j] = shiftedalphabets[j]
tabula_recta.append(temp)
def strip(string):
string = ''.join(string.split(' '))
string = string.upper()
plaintext = ""
for x in string:
if x in alphabets:
plaintext+=x
return plaintext
class autokey:
def encode(string, key):
plaintext = strip(string)
key = strip(key)
keystream = None
if len(plaintext) > len(key):
keystream = key + plaintext[:len(plaintext) - len(key)]
elif len(plaintext) < len(key):
keystream = key[:len(plaintext)]
else:
keystream = key
encoded_string=""
for i, j in zip(plaintext, keystream):
encoded_string+=tabula_recta[ord(i) - ord('A')][ord(j) - ord('A')]
return encoded_string
def decode(string, key):
decoded_string=""
key = strip(key)
for i in range(len(string)):
char = key[i]
decodecol = alphabets[ord(char)-ord('A'):] + alphabets[:ord(char)-ord('A')]
decodedchar = chr(decodecol.index(string[i]) + ord('A'))
decoded_string+=decodedchar
key+=decodedchar
return decoded_string | StarcoderdataPython |
4986394 | <reponame>OSpoon/flask-server
#!/usr/bin/env python
# encoding: utf-8
'''
@author: Spoon
@contact: <EMAIL>
@file: __init__.py.py
@time: 2019/11/25 15:57
@desc:
'''
from flask import Flask
from app.resource import ShortUrl, Working
from app.security import security
from app.config import config
from app.utils import errors
from flask_restful import Api
from flask_redis import FlaskRedis
from flasgger import Swagger
from flask_sqlalchemy import SQLAlchemy
from app.utils.common import log_exception
api = Api()
redis_client = FlaskRedis()
swagger = Swagger()
db = SQLAlchemy()
def init_api_plugin(app):
api.add_resource(ShortUrl, '/surl', endpoint="surl_post")
api.add_resource(ShortUrl, '/surl/<id>', endpoint="surl_get")
api.add_resource(Working, '/work/<date>', endpoint="work_get")
api.catch_all_404s = True
api.errors = errors
api.init_app(app)
def init_redis_plugin(app):
redis_client.init_app(app, decode_responses=True)
def init_swagger_plugin(app):
swagger.init_app(app)
def init_db_plugin(app):
db.init_app(app)
def register_blueprint(app):
from app.web import web
app.register_blueprint(web)
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(security[config_name])
app.config.from_object(config[config_name])
init_api_plugin(app)
init_redis_plugin(app)
init_swagger_plugin(app)
init_db_plugin(app)
register_blueprint(app)
from flask import got_request_exception
got_request_exception.connect(log_exception, app)
return app | StarcoderdataPython |
188614 | """
Cyborg SPDK driver modules implementation.
"""
import socket
from cyborg.accelerator.common import exception
from cyborg.accelerator.drivers.modules import generic
from oslo_log import log as logging
from oslo_config import cfg
from oslo_concurrency import processutils as putils
from cyborg.common.i18n import _
from cyborg.accelerator import configuration
from cyborg.db.sqlalchemy import api
LOG = logging.getLogger(__name__)
accelerator_opts = [
cfg.StrOpt('spdk_conf_file',
default='/etc/cyborg/spdk.conf',
help=_('SPDK conf file to use for the SPDK driver in Cyborg;')),
cfg.StrOpt('device_type',
default='NVMe',
help=_('Default backend device type: NVMe')),
cfg.IntOpt('queue',
default=8,
help=_('Default number of queues')),
cfg.IntOpt('iops',
default=1000,
help=_('Default number of iops')),
cfg.IntOpt('bandwidth:',
default=800,
help=_('Default bandwidth')),
cfg.BoolOpt('remoteable:',
default=False,
help=_('remoteable is false by default'))
]
CONF = cfg.CONF
CONF.register_opts(accelerator_opts, group=configuration.SHARED_CONF_GROUP)
try:
import py_spdk
except ImportError:
py_spdk = None
class SPDKDRIVER(generic.GENERICDRIVER):
def __init__(self, execute=putils.execute, *args, **kwargs):
super(SPDKDRIVER, self).__init__(execute, *args, **kwargs)
self.configuration.append_config_values(accelerator_opts)
self.hostname = socket.gethostname()
self.driver_type = self.configuration\
.safe_get('accelerator_backend_name') or 'SPDK'
self.device_type = self.configuration.safe_get('device_type')
self.dbconn = api.get_backend()
def initialize_connection(self, accelerator, connector):
return py_spdk.initialize_connection(accelerator, connector)
def validate_connection(self, connector):
return py_spdk.initialize_connection(connector)
def destory_db(self):
if self.dbconn is not None:
self.dbconn.close()
def discover_driver(self, driver_type):
HAVE_SPDK = None
if HAVE_SPDK:
values = {'acc_type': self.driver_type}
self.dbconn.accelerator_create(None, values)
def install_driver(self, driver_id, driver_type):
accelerator = self.dbconn.accelerator_query(None, driver_id)
if accelerator:
self.initialize_connection(accelerator, None)
self.do_setup()
ctrlr = self.get_controller()
nsid = self.get_allocated_nsid(ctrlr)
self.attach_instance(nsid)
else:
msg = (_("Could not find %s accelerator") % driver_type)
raise exception.InvalidAccelerator(msg)
def uninstall_driver(self, driver_id, driver_type):
ctrlr = self.get_controller()
nsid = self.get_allocated_nsid(ctrlr)
self.detach_instance(nsid)
pass
def driver_list(self, driver_type):
return self.dbconn.accelerator_query(None, driver_type)
def update(self, driver_type):
pass
def attach_instance(self, instance_id):
self.add_ns()
self.attach_and_detach_ns()
pass
def detach_instance(self, instance_id):
self.delete_ns()
self.detach_and_detach_ns()
pass
def get_controller(self):
return self.ctrlr
'''list controllers'''
def display_controller_list(self):
pass
'''create namespace'''
def add_ns(self):
pass
'''delete namespace'''
def delete_ns(self):
pass
'''attach namespace to controller'''
def attach_and_detach_ns(self):
pass
'''detach namespace from controller'''
def detach_and_detach_ns(self):
pass
''' format namespace or controller'''
def format_nvm(self):
pass
def get_allocated_nsid(self, ctrl):
return self.nsid
| StarcoderdataPython |
5103955 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import socket, sys
# BlockingIOError: [Errno 11] Resource temporarily unavailable
# python3 tcpConnection_test03.py 10000000 100
# 如果第一次write没有能够发送完数据,第二次调用write几乎肯定返回EAGAIN
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('172.16.31.10', 8888)) # 最好连接到网络上的一台机器
sock.setblocking(0)
a = 'a' * int(sys.argv[1]) # 两条消息由命令行给出,a应该足够大
b = 'b' * int(sys.argv[2])
n1 = sock.send(a.encode()) # 第一次发送
n2 = 0
try:
n2 = sock.send(b.encode()) # 第二次发送,遇到EAGAIN会抛socket.error异常
except socket.error as ex:
print(ex)
print(n1)
print(n2)
sock.close() | StarcoderdataPython |
4800150 | <gh_stars>0
import os
import json
import numpy as np
from naming_conventions import languages, languages_readable
from uriel import Similarities
import uriel
import copy
import pickle
from collections import defaultdict
import matplotlib.pyplot as plt
from scipy import stats
from matplotlib import colors
import seaborn as sns
"""Hardcode All This"""
train_lans = [0,5,6,11,13,16,17,19]
low_lans = [1,2,4,7,15,23]
tran_expen = np.array([34.55,40.20,45.16,0,19.19,59.97,84.64,58.28,50.65, #finnish
54.12, 68.30, 32.94, 52.72, 75.45, 12.92, 33.56, 33.67, 72.09, #norwe
44.34, 59.53, 76.02, 18.09, 54.47, 36.66, 23.46, 29.72
]) / 100
tran_expmix = np.array([68.20,58.95,52.62,0,23.11,84.36,81.65,61.98,62.29,
59.54, 70.93, 85.66, 61.11, 89.44, 24.10, 44.56, 81.73, 85.11,
56.92, 81.91, 78.70, 32.78, 64.03, 49.74, 63.06, 29.71
])/ 100
class ModelAnalysis():
def __init__(self, model_str, seed_list=[1,2,5,6,7]):
spl = model_str.split('_')
name = spl[0]
params = spl[1]
z = ""
if 'finetune' not in name:
params += '_' + spl[2]
z = "_3"
self.model_str = name + "24_seedX_" + params + "_True" + z + "VAL_averaging"
self.model_str2 = name + "24_seedX_" + params + "_True" + z + "VAL"
self.seed_list = seed_list
self.files_highlr = {}
self.files_lowlr = {}
self.files_zeroshot = defaultdict(lambda:[])
self.las_highlr = {}
self.las_lowlr = {}
self.las_zeroshot = {}
self.whole_dict = {}
for l in languages:
self._set_files_for_language(l)
for l in languages:
self._set_las_scores(l)
print(name, [len(self.las_highlr[z]) for z in self.las_highlr], [len(self.las_lowlr[z]) for z in self.las_lowlr] )
def _set_files_for_language(self, lan):
files_highlr = []
files_lowlr = []
for outerseed in self.seed_list:
testingstr = "metatesting_0.001_True3_" + self.model_str.replace('X', str(outerseed))
testingstrlow = "metatesting_0.0001_True3_" + self.model_str.replace('X', str(outerseed))
zeroshotstr = "finalresults/zeroshot_" + self.model_str2.replace('X', str(outerseed)) + "/" + lan + "_performance.json"
if os.path.exists(zeroshotstr):
self.files_zeroshot[lan].append(zeroshotstr)
#else:
#print("File not found", zeroshotstr)
#raise ValueError()
for innerseed in range(0,5):
f = "finalresults/" + testingstr + "/" + lan + "_performance" + str(innerseed) +".json"
f2 = "finalresults/" + testingstrlow + "/" + lan + "_performance" + str(innerseed) +".json"
if os.path.exists(f):
files_highlr.append(f)
if os.path.exists(f2):
files_lowlr.append(f2)
#else:
# print("File not found", f2)
self.files_highlr[lan] = files_highlr
self.files_lowlr[lan] = files_lowlr
def _set_las_scores(self, lan):
scores = []
for f in self.files_highlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
#print(scores)
self.las_highlr[lan] = np.array(scores)
scores = []
for f in self.files_lowlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
self.las_lowlr[lan] = np.array(scores)
scores = []
for f in self.files_zeroshot[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append([result['LAS']['aligned_accuracy']]*5)
self.las_zeroshot[lan] = np.array(scores)
def get_mean_sd_high(self, lan, r=99):
b = self.las_highlr[lan]
return round(np.mean(b), r), round(np.std(b),r)
def get_mean_sd_low(self, lan, r=99):
b = self.las_lowlr[lan]
return round(np.mean(b), r), round(np.std(b), r)
def get_mean_sd_zero(self, lan, r=99):
b = self.las_zeroshot[lan]
return round(np.mean(b), r), round(np.std(b), r)
class FileAnalysis(ModelAnalysis):
def __init__(self, filenames, name, zero=False):
self.name = name
self.zero = zero
self.las_lowlr = {}
if zero:
self.zero_init(filenames)
else:
self.files_highlr = defaultdict(lambda:[])
self.files_lowlr = defaultdict(lambda:[])
self.las_highlr = {}
for filename in filenames:
for lan in languages:
for innerseed in range(0,5):
f = "finalresults/metatesting_0.001_True3_" + filename + "/" + lan + "_performance" + str(innerseed) +".json"
f2 = "finalresults/metatesting_0.0001_True3_" + filename + "/" + lan + "_performance" + str(innerseed) +".json"
if os.path.exists(f):
self.files_highlr[lan].append(f)
if os.path.exists(f2):
self.files_lowlr[lan].append(f2)
#if innerseed == 0:
# print("Using file", f2)
for lan in languages:
self._set_las_scores(lan)
def zero_init(self, filenames):
self.files_lowlr = defaultdict(lambda:[])
for filename in filenames:
for lan in languages:
f2 = "finalresults/zeroshot_" + filename + "/" + lan + "_performance.json"
if os.path.exists(f2):
r = 1
if len(filenames)==1:
r = 3
for i in range(3):
self.files_lowlr[lan].append(f2)
def _set_las_scores(self, lan):
if self.zero:
scores = []
for f in self.files_lowlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
self.las_lowlr[lan] = np.array(scores)
else:
scores = []
for f in self.files_highlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
#print(scores)
#self.las_highlr[lan] = np.array(scores)
scores = []
for f in self.files_lowlr[lan]:
with open(f,'r') as results:
result = json.load(results)
scores.append(result['LAS']['aligned_accuracy'])
self.las_lowlr[lan] = np.array(scores)
def print_all(self):
for lan in languages:
print('---')
print(lan,'\t',self.get_mean_sd_high(lan, 3), self.get_mean_sd_low(lan, 3))
class MetaListAnalysis():
def __init__(self, filelist, nameslist):
self.filelist = filelist
self.names = nameslist
self.accuracy_significance = {}
self.correlation_significance = {}
self.correlations = {}
self.lookup = {name:i for i,name in enumerate(nameslist)}
for f in filelist:
for i,lan in enumerate(languages):
self.accuracy_significance[lan] = {}
for name1,model1 in zip(nameslist,filelist+[0,0]):
self.accuracy_significance[lan][name1]= {}
for name2,model2 in zip(nameslist,filelist+[0,0]):
if name2 != name1:
if 'tran-en' in name1:
array1= [tran_expen[i]]*5
elif 'tran-mix' in name1:
array1 = [tran_expmix[i]]*5
else:
array1 = model1.las_lowlr[lan]
if 'tran-en' in name2:
array2= [tran_expen[i]]*5
elif 'tran-mix' in name2:
array2 = [tran_expmix[i]]*5
else:
array2 = model2.las_lowlr[lan]
p_value = stats.ttest_ind(array1, array2 , equal_var=False).pvalue
#print("setting", name1,name2,lan)
self.accuracy_significance[lan][name1][name2] = p_value
def print_latex(self, filename, train=False, print_sd =False):
with open(filename,'w') as f:
f.write(' &' + ' & '.join(self.names) + '\\\\\\hline\n')
for i, lan in enumerate(languages):
readable_lan = languages_readable[i]
lijstje = np.array([m.get_mean_sd_low(lan,7)[0] for m in self.filelist[:-2]] + [tran_expen[i], tran_expmix[i]])
sds = np.array([m.get_mean_sd_low(lan,7)[1] for m in self.filelist[:-2]] + [0,0])
#print([m.name for m in self.filelist])
#print(lijstje)
max_index = np.nanargmax(lijstje)
notmax_lijstje = np.delete(lijstje, max_index)
max_index2 = np.nanargmax(notmax_lijstje)
names2 = np.delete(np.array(self.names), max_index)
color = "\\rowcolor{LightRed}" if i in low_lans else ''
#print(max_index2, max_index, readable_lan)
significance = self.accuracy_significance[lan][self.names[max_index]][names2[max_index2]]
#print("Is it significant?", readable_lan, self.names[max_index], names2[max_index2], significance)
#print( '\t', significance )
lijstje = ['*{\\bf ' + str(round(l,3)) + '}'
if (i == max_index and significance < 0.01 and max_index < (len(self.names)-2))
else ('{\\bf ' + str(round(l,3)) + '}' if (i==max_index)
else str(round(l,3)) )
for i,l in enumerate(lijstje)]
lijstje = [ l + ('\\tiny{$\\pm$ '+str(round(sd,3))+'}' if z< (len(self.names)-2) and print_sd else '') for z, (l, sd) in enumerate(zip(lijstje, sds))]
if i not in train_lans and not train:
# Write normal resource
with open(filename,'a') as f:
f.write(color),
f.write(readable_lan + ' & ')
f.write(' & '.join(lijstje))
f.write('\\\\\n')
# Write low resources
elif i in train_lans and train:
with open(filename,'a') as f:
f.write(readable_lan + ' & ')
f.write(' & '.join(lijstje))
f.write('\\\\\n')
def compare_two_columns(self, name1, name2):
count = 0
for i, lan in enumerate(languages):
if i not in train_lans and 'ulg' not in lan:
significance = self.accuracy_significance[lan][name1][name2]
print(lan, significance)
if significance < 0.01:
count += 1
print(count)
return count
def plot_diffs(self, experiment_list=["english","maml","x-ne"], comparator = "x-maml"):
plt.rcParams["axes.grid"] = False
diffs = np.zeros((17,len(experiment_list)))
pvalues = np.zeros((17,len(experiment_list)))
labels = np.empty((17,len(experiment_list)),dtype=object)
enum = 0
real_lans = []
for i, lan in enumerate(languages):
if i not in train_lans and 'ulg' not in lan:
for j,setup in enumerate(experiment_list):
lookup = self.filelist[self.lookup[setup]]
mean_comp = self.filelist[self.lookup[comparator]].get_mean_sd_low(lan,7)[0]*100
if type(lookup) is str:
if 'en' in lookup:
mean_comp = tran_expen[i]*100
else:
#print(lan, i, tran_expmix[i]*100,mean_comp)
mean_setup = tran_expmix[i]*100
else:
mean_setup = lookup.get_mean_sd_low(lan,7)[0]*100
diffs[enum,j] = mean_comp - mean_setup
pvalue = self.accuracy_significance[lan][comparator][setup]
pvalues[enum,j] = pvalue
labels[enum, j] = str(round(diffs[enum,j],2)) + ('*' if pvalues[enum,j] < 0.01 else '')
enum += 1
real_lans.append(languages_readable[i])
fig, ax = plt.subplots()
print(labels)
rdgn = sns.diverging_palette(h_neg=10, h_pos=250, s=99, l=55, sep=3, as_cmap=True)
#labels = np.array([['A','B'],['C','D'],['E','F']])
g = sns.heatmap(diffs, annot=labels, ax=ax, fmt = '',
cmap=rdgn, vmin=-3, center=0, vmax=30)
# We want to show all ticks...
ax.set_yticklabels(real_lans, rotation=1)
ax.set_xticklabels(experiment_list, horizontalalignment='center')
for low in [0,1,2,3,9,14]:
ax.get_yticklabels()[low].set_color("red")
ax.set_xlabel("Baseline")
#g.set_axis_labels("Baseline", "Language")
#ax.set_xticks(np.arange(5))
#ax.set_yticks(np.arange(len(real_lans)))
# ... and label them with the respective list entries.
#ax.set_xticklabels(setups)
#ax.set_yticklabels(real_lans)
#im, cbar = tools.heatmap(diffs, real_lans, setups, ax=ax,
# cmap="RdYlGn", vmin=28, center=0, vmax=-5)
# texts = tools.annotate_heatmap(im, pvalues, valfmt="{x:.1f}", fontsize=8)
ax.set_title("X-MAML Improvement" + (" (Zero-Shot)" if "zero" in experiment_list[0] else " (Few-Shot) "))
fig.tight_layout()
plt.show()
def plot_diffs_pairwise(self):
plt.rcParams["axes.grid"] = False
diffs = np.zeros((9,4))
pvalues = np.zeros((9,4))
labels = np.empty((9,4),dtype=object)
enum = 0
real_lans = []
zeros = ["zero-eng","zero-maml","zero-x-ne","zero-x-maml"]
fews = ["english","maml","x-ne","x-maml"]
for i, lan in enumerate(languages):
if i in train_lans or 'ulg' in lan:
print(lan)
for j,setup in enumerate(zeros):
#print(zeros[])
lookup = self.filelist[self.lookup[setup]]
mean_comp = self.filelist[self.lookup[fews[j]]].get_mean_sd_low(lan,7)[0]*100
if type(lookup) is str:
if 'en' in lookup:
mean_comp = tran_expen[i]*100
else:
#print(lan, i, tran_expmix[i]*100,mean_comp)
mean_setup = tran_expmix[i]*100
else:
mean_setup = lookup.get_mean_sd_low(lan,7)[0]*100
diffs[enum,j] = mean_comp - mean_setup
pvalue = self.accuracy_significance[lan][fews[j]][setup]
pvalues[enum,j] = pvalue
labels[enum, j] = str(round(diffs[enum,j],2)) + ('*' if pvalues[enum,j] < 0.01 else '')
enum += 1
real_lans.append(languages_readable[i])
fig, ax = plt.subplots()
print(labels)
rdgn = sns.diverging_palette(h_neg=10, h_pos=250, s=99, l=55, sep=3, as_cmap=True)
#labels = np.array([['A','B'],['C','D'],['E','F']])
g = sns.heatmap(diffs, annot=labels, ax=ax, fmt = '',
cmap=rdgn, vmin=-3, center=0, vmax=6)
# We want to show all ticks...
ax.set_yticklabels(real_lans, rotation=1)
ax.set_xticklabels(fews, horizontalalignment='center')
#for low in [0,1,2,3,9,14]:
# ax.get_yticklabels()[low].set_color("red")
ax.set_xlabel("Model")
#g.set_axis_labels("Baseline", "Language")
#ax.set_xticks(np.arange(5))
#ax.set_yticks(np.arange(len(real_lans)))
# ... and label them with the respective list entries.
#ax.set_xticklabels(setups)
#ax.set_yticklabels(real_lans)
#im, cbar = tools.heatmap(diffs, real_lans, setups, ax=ax,
# cmap="RdYlGn", vmin=28, center=0, vmax=-5)
# texts = tools.annotate_heatmap(im, pvalues, valfmt="{x:.1f}", fontsize=8)
#ax.set_title("X-MAML Improvement" + (" (Zero-Shot)" if "zero" in experiment_list[0] else " (Few-Shot) "))
ax.set_title("Improvement of Few-Shot over Zero-Shot", fontsize=11)
fig.tight_layout()
plt.show()
def get_results(self, which):
model = self.filelist[which]
print("Doing", model.name)
l = len(model.las_lowlr['UD_Arabic-PADT'])
self.correlations[model.name] = []
for index in range(l):
filename = str(which) + str(index) + '.csv'
with open(filename,'w') as f:
f.write('language,' + model.name + '\n')
for lan in languages:
if 'Bulg' not in lan:
with open(filename, 'a') as f:
f.write(lan + ',')
f.write(str(model.las_lowlr[lan][index]))
f.write('\n')
sim = Similarities(uriel.lang2iso, uriel.feature_names, uriel.expMix, filename)
table = sim.create_table()
self.correlations[model.name].append(table)
return self.correlations[model.name]
def compare_correlations(self):
if not os.path.exists('correlations.pickle'):
for i in range(len(self.filelist[:-2])):
self.get_results(i)
with open("correlations.pickle", "wb") as f:
pickle.dump(self.correlations, f)
else:
with open('correlations.pickle', 'rb') as f:
self.correlations = pickle.load(f)
bigtable = np.zeros((8*5,8))
enum = -1
yticklabels = []
for lan in uriel.COMPARE_LANS:
self.correlation_significance[lan] = {}
for feature in ["syntax_knn"]:
enum += 1
yticklabels.append(lan) #+"_"+feature)
for j, name1 in enumerate(self.names[:-2]):
self.correlation_significance[lan][name1] = {}
bigtable[enum,j] = np.mean(np.array([d[name1][lan][feature] for d in self.correlations[name1]]))
for name2 in self.names[:-2]:
if name1 != name2:
lang = uriel.iso2lang[lan]
#print(type(name1))
#if name1 == 'eng': name1 = "english"
#if name2 == 'eng': name2 = "english"
#print(self.correlations[name1])
array1 = [d[name1][lan][feature] for d in self.correlations[name1]]
array2 = [d[name2][lan][feature] for d in self.correlations[name2]]
p_value = stats.ttest_ind(array1,array2 ,equal_var=False).pvalue
self.correlation_significance[lan][name1][name2] = p_value
#if p_value < 0.1:
with open("hi222.txt", "a") as f:
f.write(lang+' '+feature+' '+name1+' '+name2 + ' ')
f.write(str(round(np.mean(np.array(array1)),4)) + ' ')
f.write(str(round(np.mean(np.array(array2)),4)) + ' ')
f.write(str(p_value))
f.write('\n')
fig, ax = plt.subplots()
rdgn = sns.diverging_palette(145, 280, s=85, l=25, n=7, as_cmap=True) #sns.diverging_palette(h_neg=10, h_pos=250, s=99, l=55, sep=3, as_cmap=True)
#labels = np.array([['A','B'],['C','D'],['E','F']])
g = sns.heatmap(np.array(bigtable[:8])[3,1,0,2,4,5,6,7], annot=True, ax=ax,
cmap=rdgn, vmin=-1, center=0, vmax=1)
ax.set_yticks(np.arange(len(yticklabels))+0.5, )
ax.set_xticks(np.arange(len(self.filelist[:-2]))+0.5)
ax.set_yticklabels(yticklabels, rotation=1, verticalalignment='center')
ax.set_xticklabels([b.name for b in self.filelist[:-2]], rotation=30, horizontalalignment='center')
ax.set_xlabel("Model")
ax.set_ylabel("Language for syntax features")
plt.show()
f = FileAnalysis(["finetune24_MAML_0.0001_TrueVAL_averaging"], "bad")
english = FileAnalysis(["ONLY_averaging"], "english")
maml = FileAnalysis(["metalearn24_MAMLC_0.001_5e-05_True_3VAL_averaging",
"metalearn24_MAMLC2_0.001_5e-05_True_3VAL_averaging"
"metalearn24_MAML9C_0.001_5e-05_True_3VAL_averaging",
"metalearn24_MAML10C_0.001_5e-05_True_3VAL_averaging"], "maml")
ne = FileAnalysis([
"finetune24_seed1_0.0001_TrueVAL_averaging",
"finetune24_seed6_0.0001_TrueVAL_averaging",
"finetune24_seed7_0.0001_TrueVAL_averaging", # 7
"finetune24_seed8_0.0001_TrueVAL_averaging"
],"x-ne")
xmaml = FileAnalysis([ "metalearn24_seed1_0.001_5e-05_True_3VAL_averaging",
"metalearn24_seed2_0.001_5e-05_True_3VAL_averaging",
"metalearn24_seed5_0.001_5e-05_True_3VAL_averaging",
"metalearn24_seed6_0.001_5e-05_True_3VAL_averaging"],"x-maml")
zerone = FileAnalysis(["finetune24_seed7_0.0001_TrueVAL",
"finetune24_seed6_0.0001_TrueVAL",
"finetune24_seed8_0.0001_TrueVAL"
], "zero-x-ne", zero=True)
zeroen = FileAnalysis(["english"],"zero-eng", zero=True)
zerox = FileAnalysis(["metalearn24_seed1_0.001_5e-05_True_3VAL",
"metalearn24_seed2_0.001_5e-05_True_3VAL",
"metalearn24_seed5_0.001_5e-05_True_3VAL",
"metalearn24_seed6_0.001_5e-05_True_3VAL"], "zero-x-maml", zero=True)
zeromaml = FileAnalysis(["metalearn24_MAMLC_0.001_5e-05_True_3VAL",
"metalearn24_MAMLC2_0.001_5e-05_True_3VAL"
"metalearn24_MAML9C_0.001_5e-05_True_3VAL",
"metalearn24_MAML10C_0.001_5e-05_True_3VAL"], "zero-maml", zero=True)
# Our Meta Analysis class
meta = MetaListAnalysis(
[english,maml,ne, xmaml, zeroen, zeromaml, zerone, zerox, "tran-en", "tran-mix"],
["english","maml","x-ne","x-maml","zero-eng", "zero-maml", "zero-x-ne", "zero-x-maml", "tran-en", "tran-mix"])
"""Latex"""
#meta.print_latex("all_lans.tex", print_sd=True)
#meta.print_latex("train_lans.tex", True, print_sd=True)
#meta.print_latex("test_lans_small.tex",)
#meta.print_latex("train_lans_small.tex", True,)
"""Plotting"""
#meta.plot_diffs()
#meta.plot_diffs_pairwise()
"""Getting p-values for each two columns"""
#meta.compare_two_columns("english","x-maml")
#meta.compare_two_columns("maml","x-maml")
#meta.compare_two_columns("x-ne","x-maml")
#meta.compare_two_columns("zeroen","zerox")
#meta.compare_two_columns("zerone","zerox")
#meta.compare_two_columns("zerox","x-maml")
"""Doing correlation study"""
#meta.compare_correlations()
| StarcoderdataPython |
1617226 | <reponame>aceofwings/Cantactular<gh_stars>1-10
from lib import pelt
from changepy.costs import normal_mean, normal_var, normal_meanvar
import numpy as np
size = 100
mean_a = 5.0
mean_b = 10.0
var = 0.1
data_a = np.random.normal(mean_a, var, size)
data_b = np.random.normal(mean_b, var, size)
data = np.append(data_a, data_b)
cp = pelt(normal_var(data, var), 104)
# since data is random, sometimes it might be different, but most of the time there will be at most a couple more values around 100
print(cp)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.