content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Copyright (c) 2019 leosocy. All rights reserved.
# Use of this source code is governed by a MIT-style license
# that can be found in the LICENSE file.
import io
import os
from setuptools import setup
import edcc
# Package meta-data.
NAME = "edcc"
DESCRIPTION = "EDCC: An efficient and accurate algorithm for palmprint recognition."
URL = "https://github.com/Leosocy/EDCC-Palmprint-Recognition"
EMAIL = "leosocy@gmail.com"
AUTHOR = "Leosocy"
VERSION = edcc.__version__
root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
edcc_classifiers = [
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Libraries",
]
try:
with io.open(os.path.join(root, "README.md"), encoding="utf-8") as f:
long_description = "\n" + f.read()
except FileNotFoundError:
long_description = DESCRIPTION
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=long_description,
long_description_content_type="text/markdown",
author=AUTHOR,
author_email=EMAIL,
python_requires=">=3",
url=URL,
packages=["edcc"],
package_dir={"edcc": "edcc"},
include_package_data=True,
license="MIT",
classifiers=edcc_classifiers,
)
|
nilq/baby-python
|
python
|
from toolkit.modules.make_follow_sets import follow_sets
from toolkit.modules.make_first_sets import first_sets
from toolkit.modules.grammar import is_terminal
from tabulate import tabulate
def parsing_table(pgrammar, fs, fls, error_recovery=True):
"""
Input:
pgrammar: parsed grammar
fs: first sets
fls: follow sets
error_recovery: fill parsing table with pop/scan values for error cells
"""
# nonterminals with eps in their first sets
nullables = [k for k in pgrammar.keys() if "eps" in fs[k]]
# TODO: rewrite this loop better
terminals = set()
for prod in pgrammar.values():
for rule in prod:
for sym in rule.split():
if is_terminal(sym, pgrammar) and sym != "eps":
terminals.add(sym)
if not terminals:
return
terminals = list(terminals)
terminals.append("$")
table = []
for nt, prod in pgrammar.items():
row = [None] * len(terminals)
for rule in prod:
for sym in rule.split():
eps = False
if sym == "eps":
eps = True
else:
if is_terminal(sym, pgrammar):
row[terminals.index(sym)] = "{} -> {}".format(nt, rule)
else:
for fse in fs[sym]:
if fse == "eps":
eps = True
else:
row[terminals.index(fse)] = "{} -> {}".format(nt, rule)
if eps:
for flse in fls[nt]:
row[terminals.index(flse)] = "{} -> {}".format(nt, rule)
if not eps and sym not in nullables:
break
table.append([nt] + row)
if error_recovery:
for row in table:
# row[0] is the non-terminal
for flse in fls[row[0]]:
# + 1 because we also added a non-terminal
ix = terminals.index(flse) + 1
if row[ix] is None:
row[ix] = "Pop({})".format(row[0])
# fill remaining values with 'scan'
for i in range(1, len(row)):
if row[i] is None:
row[i] = "scan"
return tabulate(table, headers=["input"] + terminals)
# if __name__ == "__main__":
# import grammar as gm
# # grammar = """
# # X -> a X | g | Y Z | eps
# # Y -> d | u Y | eps
# # Z -> i | eps
# # """
# grammar = """
# E -> T E'
# E' -> + T E' | eps
# T -> F T'
# T' -> * F T' | eps
# F -> id | ( E )
# """
# pgrammar = gm.parse(grammar)
# fs = first_sets(pgrammar)
# fls = follow_sets("E", pgrammar, fs)
# # print("first sets:")
# # gm.set_print(fs)
# # print("follow sets:")
# # gm.set_print(fls)
# make_parsing_table(pgrammar, fs, fls)
|
nilq/baby-python
|
python
|
class MemcacheError(Exception):
pass
class MemcacheServerError(Exception):
def __init__(self, server: str, message: str) -> None:
self.server = server
super().__init__(message)
|
nilq/baby-python
|
python
|
watchdog_config = """
# SDSLabs Watchdog configuration START
UsePAM yes
PasswordAuthentication no
AuthorizedKeysCommand /opt/watchdog/bin/watchdog auth -u %u -t %t -p %k
AuthorizedKeysCommandUser root
# SDSLabs Watchdog configuration END
"""
modified_options = [
'AuthorizedKeysCommand',
'AuthorizedKeysCommandUser',
'PasswordAuthentication',
'UsePAM'
]
inside_watchdog_config = False
def process_line(line):
global inside_watchdog_config
if inside_watchdog_config and line == "# SDSLabs Watchdog configuration END\n":
inside_watchdog_config = False
return ''
if inside_watchdog_config:
return ''
if line == "# SDSLabs Watchdog configuration START\n":
inside_watchdog_config = True
return ''
l = line.strip()
i = l.find('#')
if i != -1:
l = l[:i]
if len(l) == 0:
return line
i = l.find(' ')
j = l.find('\t')
if i == -1 and j != -1:
i = j
elif j == -1 and i != -1:
pass
elif j == -1 and i == -1:
return line
else:
i = min(i, j)
key = l[:i]
value = l[i+1:].strip()
if key in modified_options:
# comment this line
return '# Watchdog: Commenting the line below out\n#' + line
else:
return line
def main():
inp = open("/etc/ssh/sshd_config")
out = open("watchdog_tmp_sshd_config", "w")
lines = inp.readlines()
for l in lines:
output_line = process_line(l)
out.write(output_line)
out.write(watchdog_config)
inp.close()
out.close()
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
#author mark_purcell@ie.ibm.com
#NOTE: FOR GOFLEX OPERATIONS DONT CHANGE THE CONTENTS OF THIS FILE
#REQUEST BUG FIXES OR ENHANCEMENTS AS NECESSARY
class GoFlexMessageFormatter():
def __init__(self):
pass
def request_meter_data(self, meter, from_date, to_date):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "ts/get_timeseries_values",
"device_id": meter,
"from": from_date,
"to": to_date
}
}
}
}
def request_meter_list(self):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "ts/get_time_series"
}
}
}
}
def store_time_series(self, values):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "ts/store_timeseries_values",
"values": values
}
}
}
}
def average_time_series(self, meter, from_date, to_date):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "ts/average_timeseries_values",
"device_id": meter,
"from": from_date,
"to": to_date
}
}
}
}
def register_model(self, model_name, entity_name, signal_name):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "register_model",
"model_name": model_name,
"entity": entity_name,
"signal": signal_name
}
}
}
}
def request_model_time_series(self, model_name, entity_name, signal_name):
return {
"serviceRequest": {
"service": {
"name": "TimeseriesService",
"args": {
"cmd": "get_model_timeseries",
"model_name": model_name,
"entity": entity_name,
"signal": signal_name
}
}
}
}
def keyValueService(self, cmd, keys):
return {
"serviceRequest": {
"service": {
"name": "KeyValueService",
"args": {
"cmd": cmd,
"keys": keys
}
}
}
}
def weatherServiceTwoDayHourlyForecast(self, api_key, lat, lng):
return {
"serviceRequest": {
"service" : {
"name" : "WeatherService-TwoDayHourlyForecast-External",
"args" : {
"apiKey" : api_key,
"latitude" : lat,
"longitude" : lng
}
}
}
}
def weatherServiceSolar15DayHourlyForecast(self, api_key, lat, lng):
return {
"serviceRequest": {
"service" : {
"name" : "WeatherService-Solar15DayHourlyForecast-External",
"args" : {
"apiKey" : api_key,
"latitude" : lat,
"longitude" : lng
}
}
}
}
def weatherServiceCleanedHistorical(self, api_key, lat, lng, start, count):
return {
"serviceRequest": {
"service" : {
"name" : "WeatherService-CleanedHistorical-External",
"args" : {
"apiKey" : api_key,
"latitude" : lat,
"longitude" : lng,
"startDate" : start,
"numDays" : count
}
}
}
}
|
nilq/baby-python
|
python
|
from .gradient_penalty import *
from .wasserstain_div import *
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import sys
from formalchemy import templates
__doc__ = """
There is two configuration settings available in a global config object.
- encoding: the global encoding used by FormAlchemy to deal with unicode. Default: utf-8
- engine: A valide :class:`~formalchemy.templates.TemplateEngine`
- date_format: Used to format date fields. Default to %Y-%d-%m
- date_edit_format: Used to retrieve field order. Default to m-d-y
Here is a simple example::
>>> from formalchemy import config
>>> config.encoding = 'iso-8859-1'
>>> config.encoding
'iso-8859-1'
>>> from formalchemy import templates
>>> config.engine = templates.TempitaEngine
There is also a convenience method to set the configuration from a config file::
>>> config.from_config({'formalchemy.encoding':'utf-8',
... 'formalchemy.engine':'mako',
... 'formalchemy.engine.options.input_encoding':'utf-8',
... 'formalchemy.engine.options.output_encoding':'utf-8',
... })
>>> config.from_config({'formalchemy.encoding':'utf-8'})
>>> config.encoding
'utf-8'
>>> isinstance(config.engine, templates.MakoEngine)
True
"""
class Config(object):
__doc__ = __doc__
__name__ = 'formalchemy.config'
__file__ = __file__
__data = dict(
encoding='utf-8',
date_format='%Y-%m-%d',
date_edit_format='m-d-y',
engine = templates.default_engine,
)
def __getattr__(self, attr):
if attr in self.__data:
return self.__data[attr]
else:
raise AttributeError('Configuration has no attribute %s' % attr)
def __setattr__(self, attr, value):
meth = getattr(self, '__set_%s' % attr, None)
if callable(meth):
meth(value)
else:
self.__data[attr] = value
def __set_engine(self, value):
if isinstance(value, templates.TemplateEngine):
self.__data['engine'] = value
else:
raise ValueError('%s is not a template engine')
def _get_config(self, config, prefix):
values = {}
config_keys = config.keys()
for k in config_keys:
if k.startswith(prefix):
v = config.pop(k)
k = k[len(prefix):]
values[k] = v
return values
def from_config(self, config, prefix='formalchemy.'):
from formalchemy import templates
engine_config = self._get_config(config, '%s.engine.options.' % prefix)
for k, v in self._get_config(config, prefix).items():
if k == 'engine':
engine = templates.__dict__.get('%sEngine' % v.title(), None)
if engine is not None:
v = engine(**engine_config)
else:
raise ValueError('%sEngine does not exist' % v.title())
self.__setattr__(k, v)
def __repr__(self):
return "<module 'formalchemy.config' from '%s' with values %s>" % (self.__file__, self.__data)
sys.modules['formalchemy.config'] = Config()
|
nilq/baby-python
|
python
|
'''
Copyright (c) 2021-2022 OVGU LIA
Author: Harish Kumar Pakala
This source code is licensed under the Apache License 2.0 (see LICENSE.txt).
This source code may use other Open Source software components (see LICENSE.txt).
'''
try:
import queue as Queue
except ImportError:
import Queue as Queue
class DataManager(object):
'''
classdocs
'''
def __init__(self, pyAAS):
'''
Constructor
'''
self.pyAAS = pyAAS
self.InBoundProcessingQueue = Queue.Queue()
self.outBoundProcessingDict = {}
def pushInboundMessage(self,msg):
self.InBoundProcessingQueue.put(msg)
def configure(self):
self.pyAAS.serviceLogger.info('The Database manager is being configured')
def start(self):
self.POLL = True
self.pyAAS.serviceLogger.info('The Database manager is being started')
while self.POLL:
if (self.InBoundProcessingQueue).qsize() != 0:
inMessage = self.InBoundProcessingQueue.get()
if inMessage["functionType"] == 1:
dba = self.pyAAS.dba
_dba_method = getattr(dba,inMessage['method'])
self.outBoundProcessingDict[inMessage["instanceid"]] = _dba_method(inMessage['data'])
elif inMessage['functionType'] == 3:
dba = self.pyAAS.dba
(dba.saveNewConversationMessage(inMessage['conversationId'],inMessage['messageType'],inMessage["messageId"],inMessage["message"]))
self.pyAAS.serviceLogger.info('The Database manager is started')
def stop(self):
self.pyAAS.serviceLogger.info('The Database manager is being stopped')
self.POLL = False
self.pyAAS.serviceLogger.info('The Database manager is stopped')
def update(self):
pass
|
nilq/baby-python
|
python
|
# Последовательность треугольных чисел образуется путем сложения натуральных чисел. К примеру, 7-ое треугольное число
# равно 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. Первые десять треугольных чисел:
#
# 1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
#
# Перечислим делители первых семи треугольных чисел:
#
# 1: 1
# 3: 1, 3
# 6: 1, 2, 3, 6
# 10: 1, 2, 5, 10
# 15: 1, 3, 5, 15
# 21: 1, 3, 7, 21
# 28: 1, 2, 4, 7, 14, 28
# Как мы видим, 28 - первое треугольное число, у которого более пяти делителей.
#
# Каково первое треугольное число, у которого более пятисот делителей?
import math
from itertools import count
def get_amount_of_dividers(number):
amount = 2
for i in range(2, int(math.sqrt(number))):
if number % i == 0:
amount += 2
if math.sqrt(number) is float:
amount -= 1
return amount
def main():
for i in count(1):
number = sum(range(1, i))
amount_of_dividers = get_amount_of_dividers(number)
if amount_of_dividers >= 500:
print(f'{number} - кол-во делителей: {amount_of_dividers}')
break
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.conf import settings
if settings.WITH_WQDB:
from wq.db import rest
from wq.db.patterns import serializers as patterns
from .models import Note
rest.router.register_model(
Note,
serializer=patterns.NaturalKeyModelSerializer,
fields="__all__",
)
|
nilq/baby-python
|
python
|
# Introduction to Python
# Structure of if statements
"""
if condition:
Statements
elif condition:
Statements
else:
Statements
"""
#Grade of a student
marks = 90
# No braces in Python, Indectation does the job
if marks > 90:
print("Grade O")
elif marks > 80:
print("Grade E")
elif marks > 70:
print("Grade A")
elif marks > 60:
print("Grade B")
elif marks > 50:
print("Grade C")
else:
print("Better luck next time")
# Divisible or not
number1 = 45
number2 = 5
if number1%number2 == 0:
print("Divisible")
else:
print("not divisible")
|
nilq/baby-python
|
python
|
class DeprecatedEnv(ImportError):
pass
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# coding: utf-8
# ## Case Challenge Part I (Individual Assignment 1)
# After three years serving customers across the San Francisco Bay Area, the executives at
# Apprentice Chef have decided to take on an analytics project to better understand how much
# revenue to expect from each customer within their first year of using their services. Thus, they
# have hired you on a full-time contract to analyze their data, develop your top insights, and build a
# machine learning model to predict revenue over the first year of each customer’s life cycle. They
# have explained to you that for this project, they are not interested in a time series analysis and
# instead would like to “keep things simple” by providing you with a dataset of aggregated
# customer information.
# ## Part 1: Data Exploration
# <h3> Package imports, peaking into data and checking for missing values
# In[1]:
# Importing libraries
# Importing libraries
import pandas as pd # Data science essentials
import matplotlib.pyplot as plt # Essential graphical output
import seaborn as sns # Enhanced graphical output
import numpy as np # Mathematical essentials
import statsmodels.formula.api as smf # Regression modeling
from os import listdir # Look inside file directory
from sklearn.model_selection import train_test_split # Split data into training and testing data
import gender_guesser.detector as gender # Guess gender based on (given) name
from sklearn.linear_model import LinearRegression # OLS Regression
import sklearn.linear_model # Linear models
from sklearn.neighbors import KNeighborsRegressor # KNN for Regression
from sklearn.preprocessing import StandardScaler # standard scaler
import openpyxl
# setting pandas print options
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# Filepath
file = './Apprentice_Chef_Dataset.xlsx'
# Importing the dataset
apprentice = pd.read_excel(io=file)
# formatting and printing the dimensions of the dataset
print(f"""
Size of Original Dataset
------------------------
Observations: {apprentice.shape[0]}
Features: {apprentice.shape[1]}
There are {apprentice.isnull().any().sum()} missing values
""")
# In[2]:
# Look at the data
apprentice.head()
# In[3]:
# Checking for missing values
apprentice.isnull().any()
# The missing value is in Family name, which will not be used
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
# <h3>Analyzing the Distribution of Revenues</h3>
# <h4>Develop a histogram to analyze the distribution of the Y-variable.</h4>
# In[4]:
# Histogram to check distribution of the response variable
sns.displot(data=apprentice,
x='REVENUE',
height=5,
aspect=2)
# displaying the histogram
plt.show()
# <h4>Develop a histogram to analyze the distribution of the log of the Y-variable.</h4>
# In[5]:
# log transforming Sale_Price and saving it to the dataset
apprentice['log_REVENUE'] = np.log10(apprentice['REVENUE'])
# developing a histogram using for log Revenue
sns.displot(data=apprentice,
x='log_REVENUE',
height=5,
aspect=2)
# displaying the histogram
plt.show()
# The log data is a bit better although there is still that under represented data point
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
#
# <h3>Based on the outputs above, identify the data type of each original variable in the dataset.</h3><br>
# Use the following groupings:
#
# * CONTINUOUS
# * INTERVAL/COUNT
# * CATEGORICAL
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
#
# ## Part 2: Trend Based Features
# <h3>Checking the Continuous Data</h3>
# In[6]:
########################
# Visual EDA (Scatterplots)
########################
# setting figure size
fig, ax = plt.subplots(figsize=(10, 8))
# developing a scatterplot
plt.subplot(2, 2, 1)
sns.scatterplot(x=apprentice['AVG_TIME_PER_SITE_VISIT'],
y=apprentice['REVENUE'],
color='g')
# adding labels but not adding title
plt.xlabel(xlabel='Average Visit Time')
plt.ylabel(ylabel='Revenue')
########################
# developing a scatterplot
plt.subplot(2, 2, 2)
sns.scatterplot(x=apprentice['AVG_PREP_VID_TIME'],
y=apprentice['REVENUE'],
color='g')
# adding labels but not adding title
plt.xlabel(xlabel='Average Video Time')
plt.ylabel(ylabel='Revenue')
########################
# developing a scatterplot
plt.subplot(2, 2, 3)
sns.scatterplot(x=apprentice['TOTAL_PHOTOS_VIEWED'],
y=apprentice['REVENUE'],
color='orange')
# adding labels but not adding title
plt.xlabel(xlabel='Totals Meals')
plt.ylabel(ylabel='Revenue')
########################
# developing a scatterplot
plt.subplot(2, 2, 4)
sns.scatterplot(x=apprentice['TOTAL_MEALS_ORDERED'],
y=apprentice['REVENUE'],
color='r')
# adding labels but not adding title
plt.xlabel(xlabel='Total Meals')
plt.ylabel(ylabel='Revenue')
# cleaning up the layout and displaying the results
plt.tight_layout()
plt.show()
# It is clear that from the data collection method the Median Meal Rating and Average clicks per visit can be counted in Count data as they are not continuous data
# <h3>Checking the Interval and Count Data</h3>
# In[7]:
# Counting the number of zeroes in the interval data
noon_canc_zeroes = apprentice['CANCELLATIONS_BEFORE_NOON'].value_counts()[0]
after_canc_zeroes = apprentice['CANCELLATIONS_AFTER_NOON'].value_counts()[0]
weekly_log_zeroes = apprentice['WEEKLY_PLAN'].value_counts()[0]
early_meal_zeroes = apprentice['EARLY_DELIVERIES'].value_counts()[0]
late_meal_zeroes = apprentice['LATE_DELIVERIES'].value_counts()[0]
master_class_zeroes = apprentice['MASTER_CLASSES_ATTENDED'].value_counts()[0]
photo_view = apprentice['TOTAL_PHOTOS_VIEWED'].value_counts()[0]
# printing a table of the results
print(f"""
Yes\t\tNo
---------------------
Cancellations Before Noon | {noon_canc_zeroes}\t\t{len(apprentice) - noon_canc_zeroes}
Cancellations After Noon | {after_canc_zeroes}\t\t{len(apprentice) - after_canc_zeroes}
Weekly plan Subscription | {weekly_log_zeroes}\t\t{len(apprentice) - weekly_log_zeroes}
Early Meals. | {early_meal_zeroes}\t\t{len(apprentice) - early_meal_zeroes}
Late Meals. | {late_meal_zeroes}\t\t{len(apprentice) - late_meal_zeroes}
Master Class Attendance | {master_class_zeroes}\t\t{len(apprentice) - master_class_zeroes}
Photo Views. | {photo_view}\t\t{len(apprentice) - photo_view}
""")
# In[8]:
# Dummy Variables for the factors we found above with at leasst 100 observations
apprentice['noon_canc'] = 0
apprentice['after_canc'] = 0
apprentice['weekly_plan_sub'] = 0
apprentice['early_delivery'] = 0
apprentice['late_delivery'] = 0
apprentice['masterclass_att'] = 0
apprentice['view_photo'] = 0
# Iter over eachg column to get the new boolean feature columns
for index, value in apprentice.iterrows():
# For noon cancellations
if apprentice.loc[index, 'CANCELLATIONS_BEFORE_NOON'] > 0:
apprentice.loc[index, 'noon_canc'] = 1
# For afternoon cancelations
if apprentice.loc[index, 'CANCELLATIONS_AFTER_NOON'] > 0:
apprentice.loc[index, 'after_canc'] = 1
# Weekly meal plan subscription
if apprentice.loc[index, 'WEEKLY_PLAN'] > 0:
apprentice.loc[index, 'weekly_plan_sub'] = 1
# Early deliveries
if apprentice.loc[index, 'EARLY_DELIVERIES'] > 0:
apprentice.loc[index, 'early_delivery'] = 1
# Late Deliveries
if apprentice.loc[index, 'LATE_DELIVERIES'] > 0:
apprentice.loc[index, 'late_delivery'] = 1
# Masterclass attendance
if apprentice.loc[index, 'MASTER_CLASSES_ATTENDED'] > 0:
apprentice.loc[index, 'masterclass_att'] = 1
# Viewed Photos
if apprentice.loc[index, 'TOTAL_PHOTOS_VIEWED'] > 0:
apprentice.loc[index, 'view_photo'] = 1
# Another Factor i want to consider is make flags for whether the customer contacted customer services on more than half of their orders and whether the mobile or pc is the preffered route of ordering.
# In[9]:
# Checking distribution
contact_greater = []
mobile_greater = []
# Instantiating dummy variables
for index, value in apprentice.iterrows():
# For noon cancellations
if apprentice.loc[index, 'CONTACTS_W_CUSTOMER_SERVICE'] > (apprentice.loc[index, 'TOTAL_MEALS_ORDERED']) / 2:
contact_greater.append(1)
else:
contact_greater.append(0)
# Instantiating dummy variables
for index, value in apprentice.iterrows():
if apprentice.loc[index, 'MOBILE_LOGINS'] > apprentice.loc[index, 'PC_LOGINS']:
mobile_greater.append(1)
else:
mobile_greater.append(0)
contact_greater = pd.DataFrame(contact_greater)
mobile_greater = pd.DataFrame(mobile_greater) # PC logins are consistently more so we dop
contact_greater.value_counts() # Checking distribution of zeros
# Adding them to the data
apprentice['contact_greater'] = contact_greater
apprentice['mobile_greater'] = mobile_greater
# In[10]:
# <h4>Checking the Count and interval data after dealing with zeroes</h4>
# Some of the count data had significant information in zeroes to split them into some sort of boolean feature. Now, I will plot to distributions of interval to see which data might need transformation to give insight into our model.
# After checking the plots for all the interval data these were the ones needing transformation.
# In[11]:
# setting figure size
fig, ax = plt.subplots(figsize=(15, 10))
## Plot 1: Original X, Original Y ##
plt.subplot(1, 2, 1)
# Plotting
sns.boxplot(x='AVG_CLICKS_PER_VISIT',
y='REVENUE',
data=apprentice
)
# titles and labels
plt.title('Average clicks per visit')
## Plot 1: Original X, Original Y ##
plt.subplot(1, 2, 2)
# Plotting
sns.boxplot(x='CONTACTS_W_CUSTOMER_SERVICE',
y='REVENUE',
data=apprentice
)
# titles and labels
plt.title('Customer Service')
# Showing the displaying
plt.show()
# In[12]:
# Converting to logs and seeing if the data improves
apprentice['log_clicks'] = np.log10(apprentice['AVG_CLICKS_PER_VISIT']) # Average clicks log
apprentice['log_customer'] = np.log10(apprentice['CONTACTS_W_CUSTOMER_SERVICE']) # Customer contact
# setting figure size
fig, ax = plt.subplots(figsize=(15, 10))
## Plot 1: Original X, Original Y ##
plt.subplot(1, 2, 1)
# Plotting
sns.boxplot(x='log_clicks',
y='log_REVENUE',
data=apprentice
)
# titles and labels
plt.title('LOG Average clicks per visit')
## Plot 1: Original X, Original Y ##
plt.subplot(1, 2, 2)
# Plotting
sns.boxplot(x='log_customer',
y='log_REVENUE',
data=apprentice
)
# titles and labels
plt.title('LOG Customer Service')
# Showing the displaying
plt.show()
# In[13]:
# Dummy Variables for the factors we found above with at leasst 100 observations
apprentice['meals_below_fif'] = 0
apprentice['meals_above_two'] = 0
apprentice['unique_meals_above_ten'] = 0
apprentice['cust_serv_under_ten'] = 0
apprentice['click_under_eight'] = 0
# Iter over eachg column to get the new boolean feature columns
for index, value in apprentice.iterrows():
# Total meals greater than 200
if apprentice.loc[index, 'TOTAL_MEALS_ORDERED'] >= 200:
apprentice.loc[index, 'meals_below_fif'] = 1
# Total meals less than 15
if apprentice.loc[index, 'TOTAL_MEALS_ORDERED'] <= 15:
apprentice.loc[index, 'meals_above_two'] = 1
# Unique meals greater 10
if apprentice.loc[index, 'UNIQUE_MEALS_PURCH'] > 10:
apprentice.loc[index, 'unique_meals_above_ten'] = 1
# Customer service less than 10
if apprentice.loc[index, 'CONTACTS_W_CUSTOMER_SERVICE'] < 10:
apprentice.loc[index, 'cust_serv_under_ten'] = 1
# Clicks below 8
if apprentice.loc[index, 'AVG_CLICKS_PER_VISIT'] < 8:
apprentice.loc[index, 'click_under_eight'] = 1
# Adding the new variable
apprentice['freq_customer_service'] = 0
# Instantiating dummy variables
for index, value in apprentice.iterrows():
# For noon cancellations
if apprentice.loc[index, 'CONTACTS_W_CUSTOMER_SERVICE'] > (apprentice.loc[index, 'TOTAL_MEALS_ORDERED']) / 2:
apprentice.loc[index, 'freq_customer_service'] = 1
# In[14]:
# Log transforms
inter_list = ['LARGEST_ORDER_SIZE', 'PRODUCT_CATEGORIES_VIEWED', 'PC_LOGINS',
'TOTAL_MEALS_ORDERED', 'UNIQUE_MEALS_PURCH', 'CONTACTS_W_CUSTOMER_SERVICE']
for item in inter_list:
# Converting to logs and seeing if the data improves
apprentice['log_' + item] = np.log10(apprentice[item])
# <h3>Working with Categorical Data</h3>
# In[15]:
# STEP 1: splitting personal emails
# placeholder list
placeholder_lst = []
# looping over each email address
for index, col in apprentice.iterrows():
# splitting email domain at '@'
split_email = apprentice.loc[index, 'EMAIL'].split(sep='@')
# appending placeholder_lst with the results
placeholder_lst.append(split_email)
# converting placeholder_lst into a DataFrame
email_df = pd.DataFrame(placeholder_lst)
# STEP 2: concatenating with original DataFrame
# renaming column to concatenate
email_df.columns = ['0', 'personal_email_domain']
# concatenating personal_email_domain with friends DataFrame
apprentice = pd.concat([apprentice, email_df['personal_email_domain']],
axis=1)
# In[16]:
# printing value counts of personal_email_domain
apprentice.loc[:, 'personal_email_domain'].value_counts()
# In[17]:
# email domain types
personal_email_domains = ['@gmail.com', '@microsoft.com', '@yahoo.com',
'@msn.com', '@live.com', '@protonmail.com',
'@aol.com', '@hotmail.com', '@apple.com']
# Domain list
domain_lst = []
# looping to group observations by domain type
for domain in apprentice['personal_email_domain']:
if '@' + domain in personal_email_domains:
domain_lst.append('personal')
else:
domain_lst.append('work')
# concatenating with original DataFrame
apprentice['domain_group'] = pd.Series(domain_lst)
# checking results
apprentice['domain_group'].value_counts()
# Created some extra categorical data that we can use to try infer some more statistics
# In[18]:
# one hot encoding categorical variables
one_hot_domain = pd.get_dummies(apprentice['domain_group'])
# joining codings together
apprentice = apprentice.join([one_hot_domain])
# In[19]:
apprentice.describe()
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
#
# ## Part 3: Model Testing
# <br>
# In[20]:
# making a copy of housing
apprentice_explanatory = apprentice.copy()
# dropping SalePrice and Order from the explanatory variable set
apprentice_explanatory = apprentice_explanatory.drop(['REVENUE', 'NAME', 'EMAIL', 'FIRST_NAME',
'FAMILY_NAME', 'personal_email_domain', 'domain_group',
'log_REVENUE'], axis=1)
# formatting each explanatory variable for statsmodels
for val in apprentice_explanatory:
print(val, '+')
# In[21]:
# Step 1: build a model
lm_best = smf.ols(formula="""log_REVENUE ~ CROSS_SELL_SUCCESS +
UNIQUE_MEALS_PURCH +
CONTACTS_W_CUSTOMER_SERVICE +
PRODUCT_CATEGORIES_VIEWED +
AVG_PREP_VID_TIME +
LARGEST_ORDER_SIZE +
MEDIAN_MEAL_RATING +
AVG_CLICKS_PER_VISIT +
masterclass_att +
view_photo +
contact_greater +
mobile_greater +
log_clicks +
log_customer +
meals_below_fif +
meals_above_two +
unique_meals_above_ten +
click_under_eight +
freq_customer_service +
log_LARGEST_ORDER_SIZE +
log_PRODUCT_CATEGORIES_VIEWED +
log_TOTAL_MEALS_ORDERED +
log_UNIQUE_MEALS_PURCH +
log_CONTACTS_W_CUSTOMER_SERVICE +
personal +
work """,
data=apprentice)
# Step 2: fit the model based on the data
results = lm_best.fit()
# Step 3: analyze the summary output
print(results.summary())
# In[22]:
# preparing explanatory variable data
x_variables = ['CROSS_SELL_SUCCESS', 'UNIQUE_MEALS_PURCH', 'CONTACTS_W_CUSTOMER_SERVICE',
'PRODUCT_CATEGORIES_VIEWED', 'AVG_PREP_VID_TIME', 'LARGEST_ORDER_SIZE',
'MEDIAN_MEAL_RATING', 'AVG_CLICKS_PER_VISIT', 'masterclass_att',
'view_photo', 'log_clicks', 'log_customer', 'meals_below_fif',
'meals_above_two', 'unique_meals_above_ten', 'click_under_eight',
'freq_customer_service', 'log_LARGEST_ORDER_SIZE', 'log_PRODUCT_CATEGORIES_VIEWED',
'log_TOTAL_MEALS_ORDERED', 'log_UNIQUE_MEALS_PURCH', 'log_CONTACTS_W_CUSTOMER_SERVICE',
'personal', 'work']
apprentice_data = apprentice_explanatory[x_variables]
# preparing the target variable
apprentice_target = apprentice.loc[:, 'log_REVENUE']
# Splitting data
X_train, X_test, y_train, y_test = train_test_split(
apprentice_data,
apprentice_target,
test_size=0.25,
random_state=219)
# In[23]:
# INSTANTIATING a model object
lr = LinearRegression()
# FITTING to the training data
lr_fit = lr.fit(X_train, y_train)
# PREDICTING on new data
lr_pred = lr_fit.predict(X_test)
# SCORING the results
print('OLS Training Score :', lr.score(X_train, y_train).round(4)) # using R-square
print('OLS Testing Score :', lr.score(X_test, y_test).round(4)) # using R-square
lr_train_score = lr.score(X_train, y_train).round(4)
lr_test_score = lr.score(X_test, y_test).round(4)
# displaying and saving the gap between training and testing
print('OLS Train-Test Gap :', abs(lr_train_score - lr_test_score).round(4))
lr_test_gap = abs(lr_train_score - lr_test_score).round(4)
# In[24]:
# zipping each feature name to its coefficient
lr_model_values = zip(apprentice_data.columns,
lr_fit.coef_.round(decimals=4))
# setting up a placeholder list to store model features
lr_model_lst = [('intercept', lr_fit.intercept_.round(decimals=4))]
# printing out each feature-coefficient pair one by one
for val in lr_model_values:
lr_model_lst.append(val)
# checking the results
for pair in lr_model_lst:
print(pair)
# In[25]:
# Making the list a data frame to print later
lr_model_lst = pd.DataFrame(lr_model_lst)
# Naming the Columns
lr_model_lst.columns = ['Variables', 'Coefficients']
# Removing indices for print
lr_model_lst_no_indices = lr_model_lst.to_string(index=False)
# In[26]:
# Importing another library
import sklearn.linear_model # Linear models
# In[27]:
# INSTANTIATING a model object
lasso_model = sklearn.linear_model.Lasso() # default magitude
# FITTING to the training data
lasso_fit = lasso_model.fit(X_train, y_train)
# PREDICTING on new data
lasso_pred = lasso_fit.predict(X_test)
# SCORING the results
print('Lasso Training Score :', lasso_model.score(X_train, y_train).round(4))
print('Lasso Testing Score :', lasso_model.score(X_test, y_test).round(4))
## the following code has been provided for you ##
# saving scoring data for future use
lasso_train_score = lasso_model.score(X_train, y_train).round(4) # using R-square
lasso_test_score = lasso_model.score(X_test, y_test).round(4) # using R-square
# displaying and saving the gap between training and testing
print('Lasso Train-Test Gap :', abs(lr_train_score - lr_test_score).round(4))
lasso_test_gap = abs(lr_train_score - lr_test_score).round(4)
# In[28]:
# zipping each feature name to its coefficient
lasso_model_values = zip(apprentice_data.columns, lasso_fit.coef_.round(decimals=2))
# setting up a placeholder list to store model features
lasso_model_lst = [('intercept', lasso_fit.intercept_.round(decimals=2))]
# printing out each feature-coefficient pair one by one
for val in lasso_model_values:
lasso_model_lst.append(val)
# checking the results
for pair in lasso_model_lst:
print(pair)
# In[29]:
# INSTANTIATING a model object
ard_model = sklearn.linear_model.ARDRegression()
# FITTING the training data
ard_fit = ard_model.fit(X_train, y_train)
# PREDICTING on new data
ard_pred = ard_fit.predict(X_test)
print('ARD Training Score:', ard_model.score(X_train, y_train).round(4))
print('ARD Testing Score :', ard_model.score(X_test, y_test).round(4))
# saving scoring data for future use
ard_train_score = ard_model.score(X_train, y_train).round(4)
ard_test_score = ard_model.score(X_test, y_test).round(4)
# displaying and saving the gap between training and testing
print('ARD Train-Test Gap :', abs(ard_train_score - ard_test_score).round(4))
ard_test_gap = abs(ard_train_score - ard_test_score).round(4)
# In[30]:
# zipping each feature name to its coefficient
ard_model_values = zip(apprentice_data.columns, ard_fit.coef_.round(decimals=5))
# setting up a placeholder list to store model features
ard_model_lst = [('intercept', ard_fit.intercept_.round(decimals=2))]
# printing out each feature-coefficient pair one by one
for val in ard_model_values:
ard_model_lst.append(val)
# checking the results
for pair in ard_model_lst:
print(pair)
# In[31]:
# KNN
# INSTANTIATING a StandardScaler() object
scaler = StandardScaler()
# FITTING the scaler with the data
scaler.fit(apprentice_data)
# TRANSFORMING our data after fit
X_scaled = scaler.transform(apprentice_data)
# converting scaled data into a DataFrame
X_scaled_df = pd.DataFrame(X_scaled)
# adding labels to the scaled DataFrame
X_scaled_df.columns = apprentice_data.columns
# Training testing and splitit again
X_train_STAND, X_test_STAND, y_train_STAND, y_test_STAND = train_test_split(
X_scaled_df,
apprentice_target,
test_size=0.25,
random_state=219)
# INSTANTIATING a model with the optimal number of neighbors
knn_stand = KNeighborsRegressor(algorithm='auto',
n_neighbors=9)
# FITTING the model based on the training data
knn_stand_fit = knn_stand.fit(X_train_STAND, y_train_STAND)
# PREDITCING on new data
knn_stand_pred = knn_stand_fit.predict(X_test)
# SCORING the results
print('KNN Training Score:', knn_stand.score(X_train_STAND, y_train_STAND).round(4))
print('KNN Testing Score :', knn_stand.score(X_test_STAND, y_test_STAND).round(4))
# saving scoring data for future use
knn_stand_score_train = knn_stand.score(X_train_STAND, y_train_STAND).round(4)
knn_stand_score_test = knn_stand.score(X_test_STAND, y_test_STAND).round(4)
# displaying and saving the gap between training and testing
print('KNN Train-Test Gap:', abs(knn_stand_score_train - knn_stand_score_test).round(4))
knn_stand_test_gap = abs(knn_stand_score_train - knn_stand_score_test).round(4)
# In[32]:
# comparing results
print(f"""
Model Train Score Test Score Train-Test Gap Model Size
----- ----------- ---------- --------------- ----------
OLS {lr_train_score} {lr_test_score} {lr_test_gap} {len(lr_model_lst)}
Lasso {lasso_train_score} {lasso_test_score} {lasso_test_gap} {len(lasso_model_lst)}
ARD {ard_train_score} {ard_test_score} {ard_test_gap} {len(ard_model_lst)}
""")
# In[33]:
# creating a dictionary for model results
model_performance = {
'Model Type': ['OLS', 'Lasso', 'ARD'],
'Training': [lr_train_score, lasso_train_score,
ard_train_score],
'Testing': [lr_test_score, lasso_test_score,
ard_test_score],
'Train-Test Gap': [lr_test_gap, lasso_test_gap,
ard_test_gap],
'Model Size': [len(lr_model_lst), len(lasso_model_lst),
len(ard_model_lst)],
'Model': [lr_model_lst, lasso_model_lst, ard_model_lst]}
# converting model_performance into a DataFrame
model_performance = pd.DataFrame(model_performance)
model_performance.head()
# <hr style="height:.9px;border:none;color:#333;background-color:#333;" /><br>
#
# ## Part 4: Final Model Selected
#
# The best model from the above analysis is the OLS regression which has the following:
#
# In[34]:
# Selected Model
print(f"""
The Model selected is OLS Regression
Model Train Score Test Score Train-Test Gap Model Size
----- ----------- ---------- --------------- ----------
OLS {lr_train_score} {lr_test_score} {lr_test_gap} {len(lr_model_lst)}
Model Coefficients
----------------------
{lr_model_lst_no_indices}
""")
|
nilq/baby-python
|
python
|
from flocx_ui.api import schema
from flocx_ui.api.utils import generic_provider_request as generic_request
from flocx_ui.api.utils import validate_data_with
def post(path, **kwargs):
"""An alias for generic_request with the type set to 'POST'
:param path: A url path
:param **kwargs: The keyword arguments to be passed to the request function
:return: A request for the given path
"""
return generic_request('POST', path, **kwargs)
@validate_data_with(None, schema.validate_provider_offer)
def offer_create(request, offer):
"""Create an offer
:param request: HTTP request
:param offer: The offer to be created
:return: The offer that was created
"""
response = post('/v1/offers', json=offer, token=request.user.token.id)
data = response.json()
return data
|
nilq/baby-python
|
python
|
import bluesky.plan_stubs as bps
import bluesky.plans as bp
import bluesky.preprocessors as bpp
import numpy as np
import pytest
from ophyd.sim import SynAxis, hw
import nabs.plans as nbp
from nabs.simulators import validate_plan
hw = hw()
class LimitedMotor(SynAxis):
def check_value(self, value, **kwargs):
if np.abs(value) > 10:
raise ValueError("value out of bounds")
limit_motor = LimitedMotor(name='limit_motor', labels={'motors'})
@bpp.set_run_key_decorator("run_2")
@bpp.run_decorator(md={})
def sim_plan_inner(npts=2):
for j in range(npts):
yield from bps.mov(hw.motor1, j * 0.1 + 1,
hw.motor2, j * 0.2 - 2)
yield from bps.trigger_and_read([hw.motor1, hw.motor2,
hw.det2])
@bpp.set_run_key_decorator("run_1")
@bpp.run_decorator(md={})
def sim_plan_outer(npts):
for j in range(int(npts/2)):
yield from bps.mov(hw.motor, j * 0.2)
yield from bps.trigger_and_read([hw.motor, hw.det])
yield from sim_plan_inner(npts + 1)
for j in range(int(npts/2), npts):
yield from bps.mov(hw.motor, j * 0.2)
yield from bps.trigger_and_read([hw.motor, hw.det])
def bad_limits():
yield from bps.open_run()
yield from bps.sleep(1)
yield from bps.mv(limit_motor, 100)
yield from bps.sleep(1)
yield from bps.close_run()
def bad_nesting():
yield from bps.open_run()
yield from bp.count([])
yield from bps.close_run()
def bad_call():
yield from bps.open_run()
limit_motor.set(10)
yield from bps.close_run()
def bad_stage():
yield from bps.stage(hw.det)
@pytest.mark.parametrize(
'plan',
[
bad_limits(),
bad_nesting(),
bad_call(),
]
)
def test_bad_plans(plan):
success, _ = validate_plan(plan)
assert success is False
@pytest.mark.parametrize(
'plan',
[
sim_plan_outer(4),
bp.count([hw.det], num=2),
bp.scan([hw.det, hw.det2, hw.motor],
hw.motor, 0, 1, hw.motor2, 1, 20, 10),
nbp.daq_dscan([hw.det], hw.motor, 1, 0, 2, events=1)
]
)
def test_good_plans(plan, daq):
success, _ = validate_plan(plan)
assert success is True
|
nilq/baby-python
|
python
|
def test_list_devices(client):
devices = client.devices()
assert len(devices) > 0
assert any(map(lambda device: device.serial == "emulator-5554", devices))
def test_version(client):
version = client.version()
assert type(version) == int
assert version != 0
|
nilq/baby-python
|
python
|
import numpy as np
from gutfit import model, parameterlist
def matrix_diag3(d1,d2,d3):
return np.array([[d1, 0.0, 0.0], [0.0, d2, 0.0], [0.0, 0.0, d3]])
# Generic Rotations #
def matrix_rot23(th23):
return np.array([[1.0, 0.0 , 0.0],
[0.0, np.cos(th23), np.sin(th23)],
[0.0, -np.sin(th23), np.cos(th23)]])
def matrix_rot12(th12):
return np.array([[ np.cos(th12), np.sin(th12), 0.0],
[-np.sin(th12), np.cos(th12), 0.0],
[ 0.0, 0.0, 1.0]])
def matrix_rot13(th13, delta):
return np.array([[ np.cos(th13), 0.0, np.sin(th13) * np.exp(-1j * delta)],
[ 0.0 , 1.0, 0.0 ],
[-np.sin(th13)* np.exp(1j * delta), 0.0, np.cos(th13)]],
dtype=np.complex64)
def matrix_vckm(th12, th13, th23, delta):
return matrix_rot23(th23) @ matrix_rot13(th13, delta) @ matrix_rot12(th12)
# Phase Matrices #
def matrix_phase(a1, a2, a3):
return np.array([[np.exp(1j * a1), 0.0, 0.0],
[ 0.0, np.exp(1j * a2), 0.0],
[ 0.0, 0.0, np.exp(1j * a3)]],
dtype=np.complex64)
def matrix_Yd(a1, a2, a3, b1, b2, th12, th13, th23, delta, yd, ys, yb):
Pa = matrix_phase(a1, a2, a3)
Pb = matrix_phase(b1, b2, 0.0)
Vckm = matrix_vckm(th12, th13, th23, delta)
Yddiag = matrix_diag3(yd, ys, yb)
Yukd = Pa @ Vckm @ Yddiag @ Pb @ np.transpose(Vckm) @ Pa
return Yukd
class Type1And2SeeSaw(model.Model):
def __init__(self):
params = [
"generic_quark_phase_a1",
"generic_quark_phase_a2",
"generic_quark_phase_a3",
"generic_quark_phase_b1",
"generic_quark_phase_b2",
"data_quark_th12",
"data_quark_th13",
"data_quark_th23",
"data_quark_delta",
"data_quark_yu",
"data_quark_yc",
"data_quark_yt",
"data_quark_yd",
"data_quark_ys",
"data_quark_yb",
"model1_mL",
"model1_mR",
"model1_r1",
"model1_Rer2",
"model1_Imr2"
]
super().__init__(params)
@property
def val(self):
return np.abs(
self.MnuTheory(
self.generic_quark_phase_a1,
self.generic_quark_phase_a2,
self.generic_quark_phase_a3,
self.generic_quark_phase_b1,
self.generic_quark_phase_b2,
self.data_quark_th12,
self.data_quark_th13,
self.data_quark_th23,
self.data_quark_delta,
self.data_quark_yu,
self.data_quark_yc,
self.data_quark_yt,
self.data_quark_yd,
self.data_quark_ys,
self.data_quark_yb,
self.model1_mL,
self.model1_mR,
self.model1_r1,
self.model1_Rer2,
self.model1_Imr2
)
)
def MnuTheory(self, a1, a2, a3, b1, b2, th12q, th13q, th23q, deltaq, yu, yc, yt, yd, ys, yb, mL, mR, r1, Rer2, Imr2):
Yd = matrix_Yd(a1, a2, a3, b1, b2, th12q, th13q, th23q, deltaq, yd, ys, yb)
Yu = matrix_diag3(yu, yc, yt)
r2 = Rer2 + 1j * Imr2
type1p1 = 8.0 * (r2 - 3.0)/(r2-1.0) * Yu
type1p2 = -16.0 /(r1 * (r2 - 1.0)) * Yd
type1p3 = (r1 * (r2 - 1.0))/r2 * Yu @ np.linalg.inv(r1 * Yu - Yd) @ Yu
type1 = mR * (type1p1 + type1p2 + type1p3)
type2p1 = Yu / (r2 - 1)
type2p2 = -Yd / (r1 * (r2 - 1))
type2 = mL * (type2p1 + type2p2)
return type1 + type2
# def MnuTheory(self, a1, a2, a3, b1, b2, th12q, th13q, th23q, deltaq, yu, yc, yt, yd, ys, yb, mL, mR, r1, Rer2, Imr2):
# Yd = matrix_Yd(a1, a2, a3, b1, b2, th12q, th13q, th23q, deltaq, yd, ys, yb)
# Yu = matrix_diag3(yu, yc, yt)
# r2 = Rer2 + 1j * Imr2
# type1p1 = 8.0 * (r2 - 3.0)/(r2-1.0) * Yu
# type1p2 = -16.0 /(r1 * (r2 - 1.0)) * Yd
# type1p3 = (r1 * (r2 - 1.0))/r2 * Yu @ np.linalg.inv(r1 * Yu - Yd) @ Yu
# type1 = mR * (type1p1 + type1p2 + type1p3)
# type2p1 = Yu / (r2 - 1)
# type2p2 = -Yd / (r1 * (r2 - 1))
# type2 = (type2p1 + type2p2)
# return (type1/mL) + type2
if __name__=="__main__":
E = Type1And2SeeSaw()
PL = parameterlist.ParameterList.fromConfigFile("examples/param_card.dat")
from IPython import embed
embed()
E(PL())
import time
t0 = time.time()
for _ in range(1000000):
E(PL())
print(time.time() - t0)
|
nilq/baby-python
|
python
|
import argparse
import json
import logging
import random
import numpy as np
import torch
from decouple import config
from tqdm import tqdm
from GPT2.config import GPT2Config
from GPT2.encoder import get_encoder
from GPT2.model import GPT2LMHeadModel
from GPT2.utils import load_weight
# import os
# import torch.nn.functional as F
# from array import array
parser = argparse.ArgumentParser(description="Validity Tensor Estimation")
parser.add_argument(
"-gs",
default="data/groundStrings.json",
type=str,
help="sets the input grond string file",
)
parser.add_argument(
"-pt",
default="data/perterbationTensor.json",
type=str,
help="sets the input perterbation tensor file.",
)
parser.add_argument(
"-gvi",
default="data/groundValidityTensor.json",
type=str,
help="sets the input ground validity tensor file.",
)
parser.add_argument(
"-gvo",
default="data/groundValidityTensor.json",
type=str,
help="sets the output ground validity tensor file.",
)
parser.add_argument(
"-vo",
default="data/validityTensor.json",
type=str,
help="sets the output validity tensor file.",
)
parser.add_argument(
"-d",
type=str,
help="Sets the device to use.\n"
"Choices: 'gpu' for GPU, 'cpu' for CPU\n"
"(If left blank defaults to 'DEVICE' entry in .env file.)\n",
)
parser.add_argument(
"-checkpoint",
default=None,
type=str,
help="Begin again from end of partial validity tensor file.\n"
"Accepts: file path to .json containing validity tensor.\n",
)
args = vars(parser.parse_args())
logging.basicConfig(
filename="logs/validtyTensor.log",
level=logging.DEBUG,
format="[%(asctime)s|%(name)s|make_validity_tensor.py|%(levelname)s] %(message)s",
)
if args["d"]:
device_choice = args["d"]
else:
device_choice = config("DEVICE")
print("\nDEVICE:", device_choice, "\n")
if device_choice == "gpu" and not torch.cuda.is_available():
print("CUDA unavailable, defaulting to CPU.")
device_choice = "cpu"
if device_choice == "gpu":
print("gpu accellerated")
else:
print("cpu bound")
state_dict = torch.load(
config("MODEL_LOCATION"),
map_location="cpu"
if (not torch.cuda.is_available() or device_choice == "cpu")
else None,
)
print("\nValidity Tensor Estimation\n")
# -- Setting up PyTorch Information -- #
seed = random.randint(0, 2147483647)
np.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
# device = torch.device("cpu")
device = torch.device(
"cuda" if (torch.cuda.is_available() and device_choice == "gpu") else "cpu"
)
known_configurations = {
"s_ai": GPT2Config(),
"xl_ai": GPT2Config(
vocab_size_or_config_json_file=50257,
n_positions=1024,
n_ctx=1024,
n_embd=1600,
n_layer=48,
n_head=25,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
),
}
# -- Load Model -- #
gpt2_config = known_configurations[config("MODEL_NAME")]
model = GPT2LMHeadModel(gpt2_config)
model = load_weight(model, state_dict)
model.share_memory()
model.to(device)
model.eval()
# -- serving BrainSqueeze resources. --#
def tokenize(text: str):
enc = get_encoder()
tokens = enc.encode(text)
return tokens
def detokenize(tokens: iter):
enc = get_encoder()
text = enc.decode(tokens)
return text
def firstMismatch(tokensA: iter, tokensB: iter):
# assumes tokensA is shorter than, or as long as, tokensB.
for i in range(len(tokensA)):
if tokensA[i] != tokensB[i]:
return i
return None
def firstMismatchInclusive(tokensA: iter, tokensB: iter):
# makes no assumptions about the lengths of tokensA and tokensB.
for i in range(min(len(tokensA), len(tokensB))):
if tokensA[i] != tokensB[i]:
return i
return min(len(tokensA), len(tokensB))
def predictedDistribution(
model=model,
start_token=50256,
batch_size=1,
tokens=None,
temperature: float = None,
top_k=1,
device=device,
):
"""returns a probability distribution for the next byte-pair encoding"""
if tokens is None:
context = torch.full(
(batch_size, 1), start_token, device=device, dtype=torch.long
)
elif type(tokens) is torch.Tensor:
context = tokens.unsqueeze(0).repeat(batch_size, 1)
else:
context = (
torch.tensor(tokens, device=device, dtype=torch.long)
.unsqueeze(0)
.repeat(batch_size, 1)
)
prev = context
past = None
with torch.no_grad():
logits, past = model(prev, past=past)
logits = logits[:, -1, :]
return logits[0]
def errorSeries(tokens: list, pbar: tqdm):
radii = []
# get first radius (special case)
logits = predictedDistribution(start_token=50256) # 50256 => <|endoftext|>
prob = logits[tokens[0]]
clamped = torch.clamp(logits, min=prob, max=None)
clamped.add_(-prob)
radius = torch.count_nonzero(clamped).item()
radii.append(radius)
if pbar is not None:
pbar.update(1)
# get all following radii
for i in range(1, len(tokens)):
logits = predictedDistribution(tokens=tokens[:i])
prob = logits[tokens[i]]
clamped = torch.clamp(logits, min=prob, max=None)
clamped.add_(-prob)
radius = torch.count_nonzero(clamped).item()
radii.append(radius)
if pbar is not None:
pbar.update(1)
return radii
def partialErrorSeries(tokens: list, start: int):
def getRadius(logits, token):
prob = logits[token]
clamped = torch.clamp(logits, min=prob, max=None)
clamped.add_(-prob)
radius = torch.count_nonzero(clamped).item()
return radius
radii = []
if start == 0:
# get first radius (special case)
logits = predictedDistribution(start_token=50256) # 50256 => <|endoftext|>
radius = getRadius(logits, tokens[0])
radii.append(radius)
# then get all following radii
for i in range(1, len(tokens)):
logits = predictedDistribution(tokens=tokens[:i])
radius = getRadius(logits, tokens[i])
radii.append(radius)
return radii
else:
for i in range(start, len(tokens)):
logits = predictedDistribution(tokens=tokens[:i])
radius = getRadius(logits, tokens[i])
radii.append(radius)
return radii
def calculateGroundValidityTensor(groundStrings: iter):
gvBar = tqdm(total=len(groundStrings), desc="GroundValidity", position=0)
gvTen = []
coder = get_encoder()
for gs in groundStrings:
tokens = coder.encode(gs)
radii = errorSeries(tokens, None)
gvTen.append(radii)
gvBar.update()
return gvTen
def calculateValidityTensor(
groundTokens: iter,
groundValidityTensor: iter,
perterbationTensor: iter,
checkpoint: str = None,
):
validityTensor = []
totalBar = tqdm(total=len(perterbationTensor), desc="Total", position=0)
symbolBar = tqdm(total=len(perterbationTensor[0][1]), desc="TBD", position=1)
vectorBar = tqdm(total=len(perterbationTensor[0][1][0]), desc="Vector", position=2)
if checkpoint:
with open(checkpoint, "r") as f:
validityTensor = json.load(f)
# don't recalculate any symbols that have already been done
already = len(validityTensor)
perterbationTensor = perterbationTensor[already::]
totalBar.update(already)
coder = get_encoder()
for sym, plane in perterbationTensor:
logging.info("Started Symbol: " + sym)
symbolBar.reset()
symbolBar.set_description(sym)
vPlane = []
for i, vector in enumerate(plane):
vVector = []
vectorBar.reset(total=len(vector))
for pString in vector:
# tokenize pString
pTokens = coder.encode(pString)
# locate departure form ground tokens
departure = firstMismatch(pTokens, groundTokens[i])
if departure is not None:
# sum error up to agreement with groundTokens
agreement = sum(groundValidityTensor[i][:departure])
# calculate validity of peterbed string from departure onward
departureValidity = partialErrorSeries(pTokens, departure)
# calculate total validity
validity = agreement + sum(departureValidity)
# compare to ground validity
validity_delta = (
sum(groundValidityTensor[i]) - validity
) # lower validity is better
else:
validity_delta = 0
vVector.append(validity_delta)
vectorBar.update()
vPlane.append(vVector)
symbolBar.update()
validityTensor.append((sym, vPlane))
totalBar.update()
logging.info("Finished Symbol: " + sym)
with open(args["vo"], "w") as f: # save checkpoint
json.dump(validityTensor, f)
vectorBar.close()
symbolBar.close()
totalBar.close()
return validityTensor
if __name__ == "__main__":
# with open(args["gs"], "r") as f:
# groundStrings = json.load(f)
# gvTen = calculateGroundValidityTensor(groundStrings)
# with open(args["gvo"], "w") as f:
# json.dump(gvTen, f)
with open(args["gs"], "r") as f:
groundStrings = json.load(f)
groundTokens = []
coder = get_encoder()
for gs in groundStrings:
groundTokens.append(coder.encode(gs))
with open(args["gvi"], "r") as f:
groundValidity = json.load(f)
with open(args["pt"], "r") as f:
perterbationTensor = json.load(f)
vt = calculateValidityTensor(
groundTokens, groundValidity, perterbationTensor, checkpoint=args["checkpoint"]
)
print("\n\n\n### --- SUCCESS! --- ###\n\n\n")
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module SUN-T300-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/SUN-T300-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:04:28 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
ObjectIdentity, Bits, iso, Counter32, ModuleIdentity, NotificationType, Counter64, IpAddress, enterprises, NotificationType, MibIdentifier, Unsigned32, Gauge32, TimeTicks, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Bits", "iso", "Counter32", "ModuleIdentity", "NotificationType", "Counter64", "IpAddress", "enterprises", "NotificationType", "MibIdentifier", "Unsigned32", "Gauge32", "TimeTicks", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
t300 = ModuleIdentity((1, 3, 6, 1, 4, 1, 42, 2, 28, 2))
if mibBuilder.loadTexts: t300.setLastUpdated('0012140000Z')
if mibBuilder.loadTexts: t300.setOrganization('SUN MICROSYSTEMS INCORPORATED')
sun = MibIdentifier((1, 3, 6, 1, 4, 1, 42))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2))
storage_subsystem = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28)).setLabel("storage-subsystem")
t300Reg = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 1))
t300Purple1 = ObjectIdentity((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 1, 1))
if mibBuilder.loadTexts: t300Purple1.setStatus('current')
t300Objs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2))
t300SystemObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1))
t300UnitObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2))
t300FruObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3))
t300VolumeObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4))
t300PortObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5))
t300AttachObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6))
t300LoopObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7))
t300LogObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8))
t300OndgObjs = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9))
t300Events = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 3))
t300EventsV2 = MibIdentifier((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 3, 0))
sysId = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysId.setStatus('mandatory')
sysVendor = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysVendor.setStatus('mandatory')
sysModel = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysModel.setStatus('mandatory')
sysRevision = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysRevision.setStatus('mandatory')
sysStripeUnitSize = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysStripeUnitSize.setStatus('mandatory')
sysCacheMode = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("disabled", 1), ("writeThrough", 2), ("writeBehind", 3), ("auto", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheMode.setStatus('mandatory')
sysCacheMirror = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheMirror.setStatus('mandatory')
sysAutoDisable = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("disableOnly", 2), ("disableRecon", 3), ("reconOnly", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysAutoDisable.setStatus('obsolete')
sysMpSupport = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("readWrite", 2), ("mpxio", 3), ("std", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysMpSupport.setStatus('mandatory')
sysReadAhead = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysReadAhead.setStatus('mandatory')
sysReconRate = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("low", 1), ("medium", 2), ("high", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysReconRate.setStatus('mandatory')
sysOndgMode = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("passive", 2), ("active", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysOndgMode.setStatus('mandatory')
sysOndgTimeslice = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 13), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysOndgTimeslice.setStatus('mandatory')
sysIdleDiskTimeout = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 14), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysIdleDiskTimeout.setStatus('obsolete')
sysFruRemovalShutdown = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 15), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysFruRemovalShutdown.setStatus('mandatory')
sysBootMode = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("none", 1), ("auto", 2), ("tftp", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysBootMode.setStatus('mandatory')
sysBootDelay = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 17), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysBootDelay.setStatus('mandatory')
sysSpinDelay = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 18), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysSpinDelay.setStatus('obsolete')
sysTftpHost = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 19), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysTftpHost.setStatus('mandatory')
sysTftpFile = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 20), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysTftpFile.setStatus('mandatory')
sysIpAddr = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 21), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysIpAddr.setStatus('mandatory')
sysSubNet = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 22), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysSubNet.setStatus('mandatory')
sysGateway = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 23), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysGateway.setStatus('mandatory')
sysWriteRequests = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysWriteRequests.setStatus('mandatory')
sysReadRequests = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysReadRequests.setStatus('mandatory')
sysBlocksWritten = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysBlocksWritten.setStatus('mandatory')
sysBlocksRead = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 27), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysBlocksRead.setStatus('mandatory')
sysCacheWriteHits = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 28), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheWriteHits.setStatus('mandatory')
sysCacheWriteMisses = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 29), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheWriteMisses.setStatus('mandatory')
sysCacheReadHits = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 30), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheReadHits.setStatus('mandatory')
sysCacheReadMisses = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 31), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheReadMisses.setStatus('mandatory')
sysCacheRmwFlushes = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 32), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheRmwFlushes.setStatus('mandatory')
sysCacheReconFlushes = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 33), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheReconFlushes.setStatus('mandatory')
sysCacheStripeFlushes = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 34), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCacheStripeFlushes.setStatus('mandatory')
sysTimezone = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 35), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysTimezone.setStatus('mandatory')
sysDate = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 36), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysDate.setStatus('mandatory')
sysTime = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 37), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysTime.setStatus('mandatory')
sysRootSession = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 38), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysRootSession.setStatus('obsolete')
sysGuestSession = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 39), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysGuestSession.setStatus('obsolete')
sysLastMessage = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 40), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysLastMessage.setStatus('mandatory')
sysRarpEnabled = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 41), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysRarpEnabled.setStatus('mandatory')
sysLoop1Split = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 42), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("auto", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysLoop1Split.setStatus('mandatory')
sysLastRestart = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 43), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysLastRestart.setStatus('mandatory')
sysCtime = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 44), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysCtime.setStatus('mandatory')
sysHasVolumes = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 1, 45), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: sysHasVolumes.setStatus('mandatory')
unitCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitCount.setStatus('mandatory')
unitTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2), )
if mibBuilder.loadTexts: unitTable.setStatus('mandatory')
unitEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"))
if mibBuilder.loadTexts: unitEntry.setStatus('mandatory')
unitIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitIndex.setStatus('mandatory')
unitId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitId.setStatus('mandatory')
unitType = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("controller", 1), ("expansion", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitType.setStatus('mandatory')
unitStandby = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 2, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: unitStandby.setStatus('mandatory')
fruCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCount.setStatus('mandatory')
fruTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2), )
if mibBuilder.loadTexts: fruTable.setStatus('mandatory')
fruEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruEntry.setStatus('mandatory')
fruIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruIndex.setStatus('mandatory')
fruId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruId.setStatus('mandatory')
fruType = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("diskDrive", 1), ("controllerCard", 2), ("loopCard", 3), ("powerUnit", 4), ("midplane", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruType.setStatus('mandatory')
fruStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notInstalled", 1), ("fault", 2), ("ready", 3), ("offline", 4), ("booting", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruStatus.setStatus('mandatory')
fruState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2), ("substituted", 3), ("missing", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruState.setStatus('mandatory')
fruVendor = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruVendor.setStatus('mandatory')
fruModel = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruModel.setStatus('mandatory')
fruRevision = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 8), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruRevision.setStatus('mandatory')
fruSerialNo = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruSerialNo.setStatus('mandatory')
fruErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 2, 1, 10), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruErrors.setStatus('mandatory')
fruDiskCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskCount.setStatus('mandatory')
fruDiskTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4), )
if mibBuilder.loadTexts: fruDiskTable.setStatus('mandatory')
fruDiskEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruDiskEntry.setStatus('mandatory')
fruDiskRole = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("unassigned", 1), ("dataDisk", 2), ("standbyDisk", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskRole.setStatus('mandatory')
fruDiskPort1State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ready", 1), ("notReady", 2), ("bypass", 3), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskPort1State.setStatus('mandatory')
fruDiskPort2State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("ready", 1), ("notReady", 2), ("bypass", 3), ("unknown", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskPort2State.setStatus('mandatory')
fruDiskCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskCapacity.setStatus('mandatory')
fruDiskStatusCode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskStatusCode.setStatus('mandatory')
fruDiskVolName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskVolName.setStatus('mandatory')
fruDiskTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 4, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruDiskTemp.setStatus('mandatory')
fruCtlrCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrCount.setStatus('mandatory')
fruCtlrTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6), )
if mibBuilder.loadTexts: fruCtlrTable.setStatus('mandatory')
fruCtlrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruCtlrEntry.setStatus('mandatory')
fruCtlrCpuDesc = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrCpuDesc.setStatus('mandatory')
fruCtlrRole = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("master", 1), ("alternateMaster", 2), ("slave", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrRole.setStatus('mandatory')
fruCtlrPartnerId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrPartnerId.setStatus('mandatory')
fruCtlrCtState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9, 10))).clone(namedValues=NamedValues(("expansionUnit", 1), ("booting", 2), ("online", 3), ("disabled", 4), ("disabling", 5), ("reset", 6), ("resetting", 7), ("reconfig", 8), ("hotPlug", 9), ("virtual", 10)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrCtState.setStatus('mandatory')
fruCtlrCacheSize = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrCacheSize.setStatus('mandatory')
fruCtlrTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrTemp.setStatus('mandatory')
fruCtlrMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 7), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrMdate.setStatus('mandatory')
fruCtlrConsoleBaud = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 6, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruCtlrConsoleBaud.setStatus('mandatory')
fruLoopCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopCount.setStatus('mandatory')
fruLoopTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8), )
if mibBuilder.loadTexts: fruLoopTable.setStatus('mandatory')
fruLoopEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruLoopEntry.setStatus('mandatory')
fruLoopMode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("master", 1), ("slave", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopMode.setStatus('mandatory')
fruLoopTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopTemp.setStatus('mandatory')
fruLoopCable1State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notInstalled", 1), ("installed", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopCable1State.setStatus('mandatory')
fruLoopCable2State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("notInstalled", 1), ("installed", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopCable2State.setStatus('mandatory')
fruLoopMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 8, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruLoopMdate.setStatus('mandatory')
fruPowerCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerCount.setStatus('mandatory')
fruPowerTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10), )
if mibBuilder.loadTexts: fruPowerTable.setStatus('mandatory')
fruPowerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruPowerEntry.setStatus('mandatory')
fruPowerPowOutput = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("off", 1), ("normal", 2), ("fault", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerPowOutput.setStatus('mandatory')
fruPowerPowSource = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("line", 1), ("battery", 2), ("unknown", 3), ("none", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerPowSource.setStatus('mandatory')
fruPowerPowTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("normal", 1), ("overTemp", 2), ("unknown", 3), ("none", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerPowTemp.setStatus('mandatory')
fruPowerFan1State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("fault", 2), ("missing", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerFan1State.setStatus('mandatory')
fruPowerFan2State = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("normal", 1), ("fault", 2), ("missing", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerFan2State.setStatus('mandatory')
fruPowerBatState = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("notInstalled", 1), ("normal", 2), ("fault", 3), ("refreshing", 4), ("unknown", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerBatState.setStatus('mandatory')
fruPowerBatLife = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerBatLife.setStatus('mandatory')
fruPowerBatUsed = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerBatUsed.setStatus('mandatory')
fruPowerPowMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerPowMdate.setStatus('mandatory')
fruPowerBatMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 10, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruPowerBatMdate.setStatus('mandatory')
fruMidplaneCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 11), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruMidplaneCount.setStatus('mandatory')
fruMidplaneTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 12), )
if mibBuilder.loadTexts: fruMidplaneTable.setStatus('mandatory')
fruMidplaneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 12, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "fruIndex"))
if mibBuilder.loadTexts: fruMidplaneEntry.setStatus('mandatory')
fruMidplaneMdate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 3, 12, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: fruMidplaneMdate.setStatus('mandatory')
volCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCount.setStatus('mandatory')
volTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2), )
if mibBuilder.loadTexts: volTable.setStatus('mandatory')
volEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "volIndex"))
if mibBuilder.loadTexts: volEntry.setStatus('mandatory')
volIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volIndex.setStatus('mandatory')
volId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volId.setStatus('mandatory')
volName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volName.setStatus('mandatory')
volWWN = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volWWN.setStatus('mandatory')
volStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("deleted", 1), ("uninitialized", 2), ("unmounted", 3), ("mounted", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volStatus.setStatus('mandatory')
volCacheMode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("disabled", 1), ("writeThrough", 2), ("writeBehind", 3), ("auto", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheMode.setStatus('mandatory')
volCacheMirror = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("off", 1), ("on", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheMirror.setStatus('mandatory')
volCapacity = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 8), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCapacity.setStatus('mandatory')
volArrayWidth = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 9), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volArrayWidth.setStatus('mandatory')
volRaidLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("raid0", 1), ("raid1", 2), ("raid5", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volRaidLevel.setStatus('mandatory')
volWriteRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volWriteRequests.setStatus('mandatory')
volReadRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volReadRequests.setStatus('mandatory')
volBlocksWritten = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volBlocksWritten.setStatus('mandatory')
volBlocksRead = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volBlocksRead.setStatus('mandatory')
volSoftErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volSoftErrors.setStatus('mandatory')
volFirmErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 16), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volFirmErrors.setStatus('mandatory')
volHardErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volHardErrors.setStatus('mandatory')
volCacheWriteHits = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheWriteHits.setStatus('mandatory')
volCacheWriteMisses = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheWriteMisses.setStatus('mandatory')
volCacheReadHits = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheReadHits.setStatus('mandatory')
volCacheReadMisses = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheReadMisses.setStatus('mandatory')
volCacheRmwFlushes = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheRmwFlushes.setStatus('mandatory')
volCacheReconFlushes = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheReconFlushes.setStatus('mandatory')
volCacheStripeFlushes = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volCacheStripeFlushes.setStatus('mandatory')
volDisabledDisk = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 25), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volDisabledDisk.setStatus('mandatory')
volSubstitutedDisk = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 26), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volSubstitutedDisk.setStatus('mandatory')
volOper = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 27), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("none", 1), ("reconstructing", 2), ("reconstructingToStandby", 3), ("copyingFromStandby", 4), ("copyingToStandby", 5), ("initializing", 6), ("verifying", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: volOper.setStatus('mandatory')
volOperProgress = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 28), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volOperProgress.setStatus('mandatory')
volInitRate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 29), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volInitRate.setStatus('mandatory')
volVerifyRate = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 4, 2, 1, 30), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: volVerifyRate.setStatus('mandatory')
portCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portCount.setStatus('mandatory')
portTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2), )
if mibBuilder.loadTexts: portTable.setStatus('mandatory')
portEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "portIndex"))
if mibBuilder.loadTexts: portEntry.setStatus('mandatory')
portIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portIndex.setStatus('mandatory')
portId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portId.setStatus('mandatory')
portType = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ultraScsi", 1), ("fibreChannel", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portType.setStatus('mandatory')
portFruId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFruId.setStatus('mandatory')
portWriteRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portWriteRequests.setStatus('mandatory')
portReadRequests = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portReadRequests.setStatus('mandatory')
portBlocksWritten = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portBlocksWritten.setStatus('mandatory')
portBlocksRead = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portBlocksRead.setStatus('mandatory')
portSunHost = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portSunHost.setStatus('mandatory')
portWWN = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 10), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 136))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portWWN.setStatus('mandatory')
portStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("offline", 1), ("online", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portStatus.setStatus('mandatory')
portErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 2, 1, 12), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portErrors.setStatus('mandatory')
portFibreCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFibreCount.setStatus('mandatory')
portFibreTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 4), )
if mibBuilder.loadTexts: portFibreTable.setStatus('mandatory')
portFibreEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 4, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "portIndex"))
if mibBuilder.loadTexts: portFibreEntry.setStatus('mandatory')
portFibreAlpaMode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 4, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("hard", 1), ("soft", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFibreAlpaMode.setStatus('mandatory')
portFibreAlpa = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 5, 4, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: portFibreAlpa.setStatus('mandatory')
attachCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachCount.setStatus('mandatory')
attachTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2), )
if mibBuilder.loadTexts: attachTable.setStatus('mandatory')
attachEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "portIndex"), (0, "SUN-T300-MIB", "attachIndex"))
if mibBuilder.loadTexts: attachEntry.setStatus('mandatory')
attachIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachIndex.setStatus('mandatory')
attachLun = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachLun.setStatus('mandatory')
attachMode = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("primary", 1), ("secondary", 2), ("failover", 3)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachMode.setStatus('mandatory')
attachVolId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachVolId.setStatus('mandatory')
attachVolName = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 5), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachVolName.setStatus('mandatory')
attachVolOwner = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 6, 2, 1, 6), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: attachVolOwner.setStatus('mandatory')
loopCount = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopCount.setStatus('mandatory')
loopTable = MibTable((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2), )
if mibBuilder.loadTexts: loopTable.setStatus('mandatory')
loopEntry = MibTableRow((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1), ).setIndexNames((0, "SUN-T300-MIB", "unitIndex"), (0, "SUN-T300-MIB", "loopIndex"))
if mibBuilder.loadTexts: loopEntry.setStatus('mandatory')
loopIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopIndex.setStatus('mandatory')
loopId = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopId.setStatus('mandatory')
loopStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("available", 1), ("reserved", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopStatus.setStatus('mandatory')
loopMux = MibTableColumn((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 7, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("isolated", 1), ("top", 2), ("bottom", 3), ("middle", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: loopMux.setStatus('mandatory')
logStatus = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("disabled", 1), ("enabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logStatus.setStatus('mandatory')
logTo = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logTo.setStatus('mandatory')
logFile = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 80))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logFile.setStatus('mandatory')
logLevel = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("none-0", 1), ("error-1", 2), ("warning-2", 3), ("notice-3", 4), ("all-4", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: logLevel.setStatus('mandatory')
logPort = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 8, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: logPort.setStatus('mandatory')
ondgOper = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("test", 1), ("fastTest", 2), ("find", 3), ("fastFind", 4), ("healthCheck", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgOper.setStatus('mandatory')
ondgOperPending = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgOperPending.setStatus('mandatory')
ondgOperProgress = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgOperProgress.setStatus('mandatory')
ondgError = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 4), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 40))).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgError.setStatus('mandatory')
ondgId = MibScalar((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 2, 9, 5), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: ondgId.setStatus('mandatory')
sysMessage = NotificationType((1, 3, 6, 1, 4, 1, 42, 2, 28, 2, 3) + (0,1)).setObjects(("SUN-T300-MIB", "sysLastMessage"))
mibBuilder.exportSymbols("SUN-T300-MIB", fruPowerPowTemp=fruPowerPowTemp, t300SystemObjs=t300SystemObjs, volRaidLevel=volRaidLevel, portBlocksWritten=portBlocksWritten, fruLoopEntry=fruLoopEntry, fruTable=fruTable, ondgOperProgress=ondgOperProgress, portFruId=portFruId, logFile=logFile, portIndex=portIndex, fruLoopTemp=fruLoopTemp, fruDiskStatusCode=fruDiskStatusCode, fruPowerBatState=fruPowerBatState, sysGateway=sysGateway, sysBlocksWritten=sysBlocksWritten, portCount=portCount, loopIndex=loopIndex, t300LoopObjs=t300LoopObjs, sysStripeUnitSize=sysStripeUnitSize, portTable=portTable, sysOndgTimeslice=sysOndgTimeslice, sysTftpFile=sysTftpFile, portFibreAlpa=portFibreAlpa, sysFruRemovalShutdown=sysFruRemovalShutdown, unitType=unitType, fruDiskPort1State=fruDiskPort1State, products=products, unitCount=unitCount, fruVendor=fruVendor, fruCtlrCpuDesc=fruCtlrCpuDesc, fruPowerFan1State=fruPowerFan1State, t300FruObjs=t300FruObjs, sysGuestSession=sysGuestSession, volArrayWidth=volArrayWidth, portBlocksRead=portBlocksRead, fruId=fruId, portId=portId, t300=t300, volReadRequests=volReadRequests, unitEntry=unitEntry, volCount=volCount, volCacheRmwFlushes=volCacheRmwFlushes, ondgOper=ondgOper, portEntry=portEntry, volCacheStripeFlushes=volCacheStripeFlushes, volCacheMode=volCacheMode, sysReadAhead=sysReadAhead, sysIpAddr=sysIpAddr, fruErrors=fruErrors, volEntry=volEntry, sysDate=sysDate, volCapacity=volCapacity, volBlocksRead=volBlocksRead, sysCacheMode=sysCacheMode, fruCtlrRole=fruCtlrRole, fruMidplaneTable=fruMidplaneTable, fruPowerCount=fruPowerCount, fruMidplaneMdate=fruMidplaneMdate, sysWriteRequests=sysWriteRequests, volCacheWriteHits=volCacheWriteHits, fruDiskCapacity=fruDiskCapacity, attachVolName=attachVolName, volSubstitutedDisk=volSubstitutedDisk, t300EventsV2=t300EventsV2, portErrors=portErrors, sysSpinDelay=sysSpinDelay, fruIndex=fruIndex, fruCount=fruCount, sysAutoDisable=sysAutoDisable, t300Objs=t300Objs, sysLastRestart=sysLastRestart, fruPowerEntry=fruPowerEntry, portReadRequests=portReadRequests, sysBootMode=sysBootMode, fruModel=fruModel, PYSNMP_MODULE_ID=t300, storage_subsystem=storage_subsystem, volFirmErrors=volFirmErrors, unitId=unitId, sysHasVolumes=sysHasVolumes, portStatus=portStatus, fruSerialNo=fruSerialNo, t300UnitObjs=t300UnitObjs, loopStatus=loopStatus, fruLoopCable2State=fruLoopCable2State, fruPowerBatLife=fruPowerBatLife, sysLastMessage=sysLastMessage, fruCtlrTable=fruCtlrTable, fruMidplaneCount=fruMidplaneCount, sysCacheWriteHits=sysCacheWriteHits, fruCtlrConsoleBaud=fruCtlrConsoleBaud, t300Reg=t300Reg, volCacheReadHits=volCacheReadHits, attachIndex=attachIndex, sysSubNet=sysSubNet, fruDiskRole=fruDiskRole, sysModel=sysModel, volStatus=volStatus, volCacheReadMisses=volCacheReadMisses, attachVolId=attachVolId, sysRevision=sysRevision, fruCtlrTemp=fruCtlrTemp, fruPowerBatMdate=fruPowerBatMdate, sysLoop1Split=sysLoop1Split, volOper=volOper, portType=portType, attachMode=attachMode, logPort=logPort, t300LogObjs=t300LogObjs, unitIndex=unitIndex, portFibreCount=portFibreCount, sysReadRequests=sysReadRequests, volId=volId, portFibreEntry=portFibreEntry, sysVendor=sysVendor, volSoftErrors=volSoftErrors, fruPowerFan2State=fruPowerFan2State, sysBlocksRead=sysBlocksRead, volTable=volTable, sysId=sysId, attachEntry=attachEntry, sysRootSession=sysRootSession, ondgId=ondgId, sysCacheWriteMisses=sysCacheWriteMisses, attachLun=attachLun, attachVolOwner=attachVolOwner, sysTimezone=sysTimezone, sysCacheReconFlushes=sysCacheReconFlushes, attachTable=attachTable, t300Events=t300Events, logLevel=logLevel, sysCacheMirror=sysCacheMirror, volWriteRequests=volWriteRequests, t300OndgObjs=t300OndgObjs, sysCacheStripeFlushes=sysCacheStripeFlushes, portFibreAlpaMode=portFibreAlpaMode, logStatus=logStatus, t300AttachObjs=t300AttachObjs, fruCtlrCount=fruCtlrCount, loopTable=loopTable, volDisabledDisk=volDisabledDisk, fruEntry=fruEntry, sysMessage=sysMessage, fruDiskEntry=fruDiskEntry, portWWN=portWWN, volVerifyRate=volVerifyRate, volName=volName, sun=sun, sysReconRate=sysReconRate, fruDiskPort2State=fruDiskPort2State, fruCtlrCtState=fruCtlrCtState, fruPowerPowOutput=fruPowerPowOutput, fruCtlrPartnerId=fruCtlrPartnerId, fruStatus=fruStatus, fruLoopTable=fruLoopTable, fruPowerPowMdate=fruPowerPowMdate, sysCacheReadMisses=sysCacheReadMisses, fruLoopMdate=fruLoopMdate, portFibreTable=portFibreTable, ondgOperPending=ondgOperPending, fruPowerTable=fruPowerTable, sysCacheReadHits=sysCacheReadHits, logTo=logTo, loopEntry=loopEntry, volCacheWriteMisses=volCacheWriteMisses, fruType=fruType, fruDiskTemp=fruDiskTemp, volCacheReconFlushes=volCacheReconFlushes, volInitRate=volInitRate, attachCount=attachCount, fruPowerBatUsed=fruPowerBatUsed, fruCtlrEntry=fruCtlrEntry, ondgError=ondgError, t300VolumeObjs=t300VolumeObjs, sysCtime=sysCtime, loopId=loopId, fruDiskCount=fruDiskCount, sysOndgMode=sysOndgMode, volCacheMirror=volCacheMirror, portWriteRequests=portWriteRequests, sysCacheRmwFlushes=sysCacheRmwFlushes, sysTime=sysTime, fruLoopMode=fruLoopMode, loopMux=loopMux, fruDiskVolName=fruDiskVolName, volIndex=volIndex, sysTftpHost=sysTftpHost, fruState=fruState, fruCtlrCacheSize=fruCtlrCacheSize, loopCount=loopCount, fruPowerPowSource=fruPowerPowSource, sysIdleDiskTimeout=sysIdleDiskTimeout, sysBootDelay=sysBootDelay, volBlocksWritten=volBlocksWritten, fruRevision=fruRevision, unitStandby=unitStandby, fruLoopCount=fruLoopCount, volHardErrors=volHardErrors, fruDiskTable=fruDiskTable, fruLoopCable1State=fruLoopCable1State, fruCtlrMdate=fruCtlrMdate, sysRarpEnabled=sysRarpEnabled, fruMidplaneEntry=fruMidplaneEntry, t300Purple1=t300Purple1, unitTable=unitTable, volWWN=volWWN, sysMpSupport=sysMpSupport, volOperProgress=volOperProgress, t300PortObjs=t300PortObjs, portSunHost=portSunHost)
|
nilq/baby-python
|
python
|
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import os
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from nlp_architect.models.temporal_convolutional_network import TCN, CommonLayers
class TCNForLM(TCN, CommonLayers):
"""
Main class that defines training graph and defines training run method for language modeling
"""
def __init__(self, *args, **kwargs):
super(TCNForLM, self).__init__(*args, **kwargs)
self.num_words = None
self.input_placeholder_tokens = None
self.label_placeholder_tokens = None
self.learning_rate = None
self.input_embeddings = None
self.prediction = None
self.projection_out = None
self.gen_seq_prob = None
self.training_loss = None
self.validation_loss = None
self.test_loss = None
self.merged_summary_op_train = None
self.merged_summary_op_test = None
self.merged_summary_op_val = None
self.training_update_step = None
def run(self, data_loaders, lr, num_iterations=100, log_interval=100, result_dir="./",
ckpt=None):
"""
Args:
data_loaders: dict, keys are "train", "valid", "test",
values are corresponding iterator dataloaders
lr: float, learning rate
num_iterations: int, number of iterations to run
log_interval: int, number of iterations after which to run validation and log
result_dir: str, path to results directory
ckpt: str, location of checkpoint file
Returns:
None
"""
summary_writer = tf.summary.FileWriter(os.path.join(result_dir, "tfboard"),
tf.get_default_graph())
saver = tf.train.Saver(max_to_keep=None)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess.run(init)
if ckpt is not None:
saver.restore(sess, ckpt)
all_vloss = []
for i in range(num_iterations):
x_data, y_data = next(data_loaders["train"])
feed_dict = {self.input_placeholder_tokens: x_data,
self.label_placeholder_tokens: y_data, self.training_mode: True,
self.learning_rate: lr}
_, summary_train, total_loss_i = sess.run([self.training_update_step,
self.merged_summary_op_train,
self.training_loss],
feed_dict=feed_dict)
summary_writer.add_summary(summary_train, i)
if i % log_interval == 0:
print("Step {}: Total: {}".format(i, total_loss_i))
saver.save(sess, result_dir, global_step=i)
val_loss = {}
for split_type in ["valid", "test"]:
val_loss[split_type] = 0
data_loaders[split_type].reset()
count = 0
for x_data_test, y_data_test in data_loaders[split_type]:
feed_dict = {self.input_placeholder_tokens: x_data_test,
self.label_placeholder_tokens: y_data_test,
self.training_mode: False}
val_loss[split_type] += sess.run(self.training_loss, feed_dict=feed_dict)
count += 1
val_loss[split_type] = val_loss[split_type] / count
summary_val = sess.run(self.merged_summary_op_val,
feed_dict={self.validation_loss: val_loss["valid"]})
summary_test = sess.run(self.merged_summary_op_test,
feed_dict={self.test_loss: val_loss["test"]})
summary_writer.add_summary(summary_val, i)
summary_writer.add_summary(summary_test, i)
print("Validation loss: {}".format(val_loss["valid"]))
print("Test loss: {}".format(val_loss["test"]))
all_vloss.append(val_loss["valid"])
if i > 3 * log_interval and val_loss["valid"] >= max(all_vloss[-5:]):
lr = lr / 2.
def run_inference(self, ckpt, num_samples=10, sos=0, eos=1):
"""
Method for running inference for generating sequences
Args:
ckpt: Location of checkpoint file with trained model
num_samples: int, number of samples to generate
sos: int, start of sequence symbol
eos: int, end of sequence symbol
Returns:
List of sequences
"""
saver = tf.train.Saver(max_to_keep=None)
sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
if ckpt is not None:
saver.restore(sess, ckpt)
results = self.sample_sequence(sess, num_samples, sos=sos, eos=eos)
return results
def build_train_graph(self, num_words=20000, word_embeddings=None, max_gradient_norm=None,
em_dropout=0.4):
"""
Method that builds the graph for training
Args:
num_words: int, number of words in the vocabulary
word_embeddings: numpy array, optional numpy array to initialize embeddings
max_gradient_norm: float, maximum gradient norm value for clipping
em_dropout: float, dropout rate for embeddings
Returns:
None
"""
self.num_words = num_words
with tf.variable_scope("input", reuse=True):
self.input_placeholder_tokens = tf.placeholder(tf.int32, [None, self.max_len],
name='input_tokens')
self.label_placeholder_tokens = tf.placeholder(tf.int32, [None, self.max_len],
name='input_tokens_shifted')
self.learning_rate = tf.placeholder(tf.float32, shape=(), name='learning_rate')
self.input_embeddings = self.define_input_layer(self.input_placeholder_tokens,
word_embeddings,
embeddings_trainable=True)
input_embeddings_dropped = tf.layers.dropout(self.input_embeddings,
rate=em_dropout,
training=self.training_mode)
self.prediction = self.build_network_graph(input_embeddings_dropped,
last_timepoint=False)
if self.prediction.shape[-1] != self.n_features_in:
print("Not tying weights")
tied_weights = False
else:
print("Tying weights")
tied_weights = True
self.projection_out = self.define_projection_layer(self.prediction,
tied_weights=tied_weights)
self.gen_seq_prob = tf.nn.softmax(self.projection_out)
with tf.variable_scope("training"):
params = tf.trainable_variables()
soft_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=self.label_placeholder_tokens, logits=self.projection_out)
ce_last_tokens = tf.slice(soft_ce, [0, int(self.max_len / 2)],
[-1, int(self.max_len / 2)])
self.training_loss = tf.reduce_mean(ce_last_tokens)
summary_ops_train = [tf.summary.scalar("Training Loss", self.training_loss),
tf.summary.scalar("Training perplexity",
tf.exp(self.training_loss))]
self.merged_summary_op_train = tf.summary.merge(summary_ops_train)
self.validation_loss = tf.placeholder(tf.float32, shape=())
summary_ops_val = [tf.summary.scalar("Validation Loss", self.validation_loss),
tf.summary.scalar("Validation perplexity",
tf.exp(self.validation_loss))]
self.merged_summary_op_val = tf.summary.merge(summary_ops_val)
self.test_loss = tf.placeholder(tf.float32, shape=())
summary_ops_test = [tf.summary.scalar("Test Loss", self.test_loss),
tf.summary.scalar("Test perplexity", tf.exp(self.test_loss))]
self.merged_summary_op_test = tf.summary.merge(summary_ops_test)
# Calculate and clip gradients
gradients = tf.gradients(self.training_loss, params)
if max_gradient_norm is not None:
clipped_gradients, _ = tf.clip_by_global_norm(gradients, max_gradient_norm)
else:
clipped_gradients = gradients
grad_norm = tf.global_norm(clipped_gradients)
summary_ops_train.append(tf.summary.scalar("Grad Norm", grad_norm))
# Optimization
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
summary_ops_train.append(tf.summary.scalar("Learning rate", self.learning_rate))
self.merged_summary_op_train = tf.summary.merge(summary_ops_train)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
with tf.control_dependencies(update_ops):
self.training_update_step = optimizer.apply_gradients(zip(clipped_gradients,
params))
def sample_sequence(self, sess, num_samples=10, sos=0, eos=1):
"""
Method for sampling a sequence (repeatedly one symbol at a time)
Args:
sess: tensorflow session
num_samples: int, number of samples to generate
sos: int, start of sequence symbol
eos: int, end of sequence symbol
Returns:
List of sequences
"""
all_sequences = []
for _ in tqdm(range(num_samples)):
sampled_sequence = []
input_sequence = sos * np.ones((1, self.max_len))
count = 0
elem = sos
while (elem != eos) and (count <= self.max_len * 10):
feed_dict = {self.input_placeholder_tokens: input_sequence,
self.training_mode: False}
gen_seq_prob_value = sess.run(self.gen_seq_prob, feed_dict=feed_dict)
prob = gen_seq_prob_value[0, -1, :].astype(np.float64)
prob = prob / sum(prob)
elem = np.where(np.random.multinomial(1, prob))[0][0]
input_sequence = np.roll(input_sequence, -1, axis=-1)
input_sequence[:, -1] = elem
count += 1
sampled_sequence.append(elem)
all_sequences.append(sampled_sequence)
return all_sequences
|
nilq/baby-python
|
python
|
def foo():
print "hello every body"
|
nilq/baby-python
|
python
|
from relevanceai.base import _Base
from relevanceai.api.endpoints.centroids import CentroidsClient
class ClusterClient(_Base):
def __init__(self, project, api_key):
self.project = project
self.api_key = api_key
self.centroids = CentroidsClient(project=project, api_key=api_key)
super().__init__(project, api_key)
def aggregate(
self,
dataset_id: str,
vector_fields: list,
metrics: list = [],
groupby: list = [],
filters: list = [],
page_size: int = 20,
page: int = 1,
asc: bool = False,
flatten: bool = True,
alias: str = "default",
):
"""
Takes an aggregation query and gets the aggregate of each cluster in a collection. This helps you interpret each cluster and what is in them.
It can only can be used after a vector field has been clustered. \n
For more information about aggregations check out services.aggregate.aggregate.
Parameters
----------
dataset_id : string
Unique name of dataset
vector_fields : list
The vector field that was clustered on
metrics: list
Fields and metrics you want to calculate
groupby: list
Fields you want to split the data into
filters: list
Query for filtering the search results
page_size: int
Size of each page of results
page: int
Page of the results
asc: bool
Whether to sort results by ascending or descending order
flatten: bool
Whether to flatten
alias: string
Alias used to name a vector field. Belongs in field_{alias}vector
"""
endpoint = "/services/cluster/aggregate"
method = "POST"
parameters = {
"dataset_id": dataset_id,
"aggregation_query": {"groupby": groupby, "metrics": metrics},
"filters": filters,
"page_size": page_size,
"page": page,
"asc": asc,
"flatten": flatten,
"vector_fields": vector_fields,
"alias": alias,
}
self._log_to_dashboard(
method=method,
parameters=parameters,
endpoint=endpoint,
dashboard_type="cluster_aggregation",
)
return self.make_http_request(
endpoint=endpoint, method=method, parameters=parameters
)
def facets(
self,
dataset_id: str,
facets_fields: list = [],
page_size: int = 20,
page: int = 1,
asc: bool = False,
date_interval: str = "monthly",
):
"""
Takes a high level aggregation of every field and every cluster in a collection. This helps you interpret each cluster and what is in them. \n
Only can be used after a vector field has been clustered.
Parameters
----------
dataset_id : string
Unique name of dataset
facets_fields : list
Fields to include in the facets, if [] then all
page_size: int
Size of each page of results
page: int
Page of the results
asc: bool
Whether to sort results by ascending or descending order
date_interval: string
Interval for date facets
"""
return self.make_http_request(
endpoint="/services/cluster/facets",
method="GET",
parameters={
"dataset_id": dataset_id,
"facets_fields": facets_fields,
"page_size": page_size,
"page": page,
"asc": asc,
"date_interval": date_interval,
},
)
|
nilq/baby-python
|
python
|
import requests
import urllib
from bs4 import BeautifulSoup
from os import path, makedirs
import wget
class Crawler:
"""
Class for crawl by page ulr-like 'http(s)://page_path/page_name_{number}/ and download pictures
"""
def __init__(self, url_pattern, page_number, css_alt=None):
self.url_pattern = url_pattern
self.page_number = page_number
self.image_urls = []
self.css_alt = css_alt
self.local_path = path.join(path.dirname(path.realpath(__file__)))
self.drop_folder = path.join(self.local_path, self.url_pattern.strip().split('/')[-3])
def get_images_url_list(self):
for num, image_url in enumerate(self.image_urls):
print("Number: {}\t Url: {}\n".format(num, image_url))
def images_urls(self, url_):
r = requests.get(url_)
soup = BeautifulSoup(r.content.decode(), "html.parser")
if self.css_alt:
allfind = ("img", {"alt": self.css_alt})
else:
allfind = ("img")
for img in soup.findAll(allfind):
self.image_urls.append(img.get('src'))
def images(self, url_, drop_name):
if not path.isdir(self.drop_folder):
makedirs(self.drop_folder, mode=0o777, exist_ok=True)
drop_path = path.join(self.drop_folder, drop_name)
try:
wget.download(url_.strip(), drop_path)
except (ValueError, urllib.error.HTTPError) as e:
print("Can't get url {} on page {} because errors {}".format(url_, self.page_number, e))
pass
def main(self):
page_url = self.url_pattern.format(num=self.page_number)
self.images_urls(page_url)
self.get_images_url_list()
if int(self.page_number) < 10:
self.page_number = '0{}'.format(self.page_number)
for num, image_url in enumerate(self.image_urls):
drop_name = '{}.{}.jpg'.format(self.page_number, num)
self.images(image_url, drop_name)
if __name__ == '__main__':
url_p= 'http://site_name_{num}/'
n = 'num'
print("Downloading from page {}\n".format(n))
crawler = Crawler(url_pattern=url_p, page_number=n)
crawler.main()
|
nilq/baby-python
|
python
|
# Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scripts.autoscale_sge import CloudProvider, CloudPipelineInstanceHelper
AZURE_DSV = "Dsv3"
AZURE_BMS = "Bms"
GCP_STANDARD = "standard"
GCP_HIGHCPU = "highcpu"
AWS_C5 = "c5"
AWS_P2 = "p2"
def test_aws_familes():
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.aws(), "c5.xlarge")
assert family == AWS_C5
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.aws(), "p2.xlarge")
assert family == AWS_P2
def test_gcp_familes():
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.gcp(), "n2-standard-2")
assert family == GCP_STANDARD
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.gcp(), "n2-highcpu-2")
assert family == GCP_HIGHCPU
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.gcp(), "custom-12-16")
assert family is None
def test_azure_familes():
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.azure(), "Standard_B1ms")
assert family == AZURE_BMS
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.azure(), "Standard_D2s_v3")
assert family == AZURE_DSV
family = CloudPipelineInstanceHelper.get_family_from_type(CloudProvider.azure(), "Standard_D16s_v3")
assert family == AZURE_DSV
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# Povolene knihovny: copy, math
# Import jakekoli jine knihovny neprojde vyhodnocovaci sluzbou.
# To, ze jsou nejake knihovny povolene, neznamena, ze je nutne je pouzit.
# IB002 Domaci uloha 9.
#
# V teto uloze se budeme zabyvat binarnimi vyhledavacimi stromy.
#
# V prvni casti bude Vasi ulohou sestavit skoro uplny binarni vyhledavaci strom
# obsahujici zadane klice. Vstupni pole klicu bude usporadano od nejmensich po
# nejvetsi. Vas algoritmus musi mit LINEARNI casovou slozitost vzhledem k poctu
# zadanych klicu. Tento pozadavek je splnitelny diky usporadanosti pole na
# vstupu.
#
# V druhe casti bude Vasi ulohou zjistit, jestli zadany binarni vyhledavaci
# strom je skoro uplny. Pozadovana casova slozitost je linearni vuci poctu uzlu
# ve strome.
#
# Ve treti casti bude Vasi ulohou zjistit, jestli zadany binarni vyhledavaci
# strom ma vsechny listy ve stejne hloubce. Pozadovana casova slozitost je opet
# linearni vuci poctu uzlu ve strome.
#
# Skoro uplny strom ma zaplnena vsechna patra, jen posledni nemusi byt uplne
# zaplneno (a rovnez nemusi byt doleva zarovnane).
#
# Pro ilustraci, pro vstup (1,2,3,4,5,6,7,8,9,10) je korektnim vystupem
# algoritmu z prvni casti napriklad jeden z nasledujicich stromu:
#
# ( 5 ) ( 7 )
# / \ / \
# (2) (8) ( 4 ) ( 9 )
# / \ / \ / \ / \
# (1) (3) (6) (9) (2) (6) (8) (10)
# \ \ \ / \ /
# (4) (7) (10) (1) (3) (5)
# Do nasledujicich definic trid nijak nezasahujte.
# Pro vykreslovani stromu muzete pouzit dodanou funkci make_graph nize.
class BSTree:
"""Trida BSTree pro reprezentaci binarniho vyhledavacicho stromu.
Atributy:
root koren stromu typu Node, nebo None, pokud je strom prazdny
"""
def __init__(self):
self.root = None
class Node:
"""Trida Node pro reprezentaci uzlu binarniho vyhledavaciho stromu.
Atributy:
data hodnota daneho uzlu (zadana pri inicializaci)
left odkaz na leveho potomka typu Node, nebo None, pokud neexistuje
right odkaz na praveho potomka typu Node, nebo None, pokud neexistuje
"""
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Ukol 1.
# Implementuje funkci build_bst, ktera dostane vzestupne serazeny seznam hodnot
# a vytvori z nich skoro uplny binarni vyhledavaci strom (typu BSTree).
def build_bst_rec(array, start, end):
""" Build almost complete tree. """
if start > end:
return None
mid = (start + end) // 2
node = Node(array[mid])
node.left = build_bst_rec(array, start, mid - 1)
node.right = build_bst_rec(array, mid + 1, end)
return node
def build_bst(array):
"""
vstup: 'array' vzestupne serazene pole hodnot
vystup: strom typu BSTree, ktery je skoro uplny (viz vyse) a obsahuje
hodnoty z pole array
casova slozitost: O(n), kde 'n' je delka array
extrasekvencni prostorova slozitost:
O(1), nepocitame do ni ovsem vstupni pole ani vystupni strom
"""
tree = BSTree()
tree.root = build_bst_rec(array, 0, len(array) - 1)
return tree
# Ukol 2.
# Implementujte funkci check_almost_complete, ktera dostane binarni vyhledavaci
# strom a otestujte, zda je skoro uplny.
def tree_height_n(node):
""" Return tree height. """
if node is None:
return -1
left = tree_height_n(node.left)
right = tree_height_n(node.right)
return max(left, right) + 1
def check_almost_complete_rec(node, depth, height):
""" Check if given tree is almost complete tree recursively. """
if depth >= height - 1:
return True
if node.left is None or node.right is None:
return False
return check_almost_complete_rec(node.left, depth + 1, height) \
and \
check_almost_complete_rec(node.right, depth + 1, height)
def check_almost_complete(tree):
"""
vstup: 'tree' binarni vyhledavaci strom typu BSTree
vystup: True, pokud je 'tree' skoro uplny
False, jinak
casova slozitost: O(n), kde 'n' je pocet uzlu stromu
extrasekvencni prostorova slozitost: O(1) (nepocitame vstup)
"""
if tree.root is None:
return True
height = tree_height_n(tree.root)
return check_almost_complete_rec(tree.root, 0, height)
# Ukol 3.
# Implementujte funkci check_all_leaves_same_depth, ktera overi, zda jsou
# vsechny listy zadaneho binarniho vyhledavaciho stromu ve stejne hloubce.
class Storage:
def __init__(self):
self.level = None
def check_all_leaves_same_depth_rec(node, depth, storage):
if node is None:
return True
if node.left is None and node.right is None:
if storage.level is None:
storage.level = depth
return True
return depth == storage.level
return check_all_leaves_same_depth_rec(node.left, depth + 1, storage) \
and \
check_all_leaves_same_depth_rec(node.right, depth + 1, storage)
def check_all_leaves_same_depth(tree):
"""
vstup: 'tree' binarni vyhledavaci strom typu BSTree
vystup: True, pokud jsou vsechny listy 'tree' ve stejne hloubce
False, jinak
casova slozitost: O(n), kde 'n' je pocet uzlu stromu
extrasekvencni prostorova slozitost: O(1) (nepocitame vstup)
"""
return check_all_leaves_same_depth_rec(tree.root, 0, Storage())
# Pomocna funkce make_graph vygeneruje .dot soubor na zaklade stromu predaneho
# v argumentu. Cilem funkce je jen zobrazit aktualni stav daneho uzlu a jeho
# potomku, nijak nekontroluje jestli se jedna o BVS.
#
# Na vygenerovany soubor si bud najdete nastroj, nebo pouzijte odkazy:
# http://sandbox.kidstrythisathome.com/erdos/ nebo http://www.webgraphviz.com/
#
# Staci zkopirovat obsah souboru do formulare webove stranky.
def make_graph(tree, filename="bst.dot"):
def dot_node(fd, node):
if node is None:
return
fd.write('{} [label="{}"]\n'.format(id(node), node.data))
for child, lr in (node.left, 'L'), (node.right, 'R'):
dot_node(fd, child)
dot_node_relations(fd, node, child, lr)
def dot_node_relations(fd, parent, node, direction):
if node is None:
nil = direction + str(id(parent))
fd.write('{} [label="",color=white]\n{} -> {}\n'
.format(nil, id(parent), nil))
else:
fd.write('{} -> {}\n'.format(id(parent), id(node)))
with open(filename, "w") as fd:
fd.write("digraph {\n")
fd.write("node [color=lightblue2,style=filled]\n")
dot_node(fd, tree.root)
fd.write("}\n")
##################################################################
# TESTS
##################################################################
bs_tree_0 = build_bst([0])
bs_tree_1 = build_bst([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
bs_tree_2 = build_bst([1, 1, 1, 1, 1, 2, 3, 3, 4, 5, 5, 5, 5, 6])
bs_tree_3 = BSTree()
node_0 = Node(0)
node_1 = Node(1)
node_2 = Node(2)
node_3 = Node(3)
node_4 = Node(4)
node_1.left = node_0
node_1.right = node_2
node_2.right = node_3
node_3.right = node_4
bs_tree_3.root = node_1
bs_tree_4 = BSTree()
node_1_1 = Node(1)
node_1_2 = Node(2)
node_1_3 = Node(3)
node_1_1.right = node_1_2
node_1_2.right = node_1_3
bs_tree_4.root = node_1_1
print(tree_height_n(bs_tree_0.root))
print(tree_height_n(bs_tree_1.root))
print(tree_height_n(bs_tree_2.root))
print(tree_height_n(bs_tree_3.root))
print(tree_height_n(bs_tree_4.root))
print("Check if binary tree is almost complete tree")
print(check_almost_complete(bs_tree_0)) # true
print(check_almost_complete(bs_tree_1)) # true
print(check_almost_complete(bs_tree_2)) # true
print(check_almost_complete(bs_tree_3)) # false
print(check_almost_complete(bs_tree_4)) # false
print("Check if all leaves of binary tree have same depth")
print(check_all_leaves_same_depth(bs_tree_0)) # true
print(check_all_leaves_same_depth(bs_tree_1)) # false
print(check_all_leaves_same_depth(bs_tree_2)) # true
print(check_all_leaves_same_depth(bs_tree_3)) # false
print(check_all_leaves_same_depth(bs_tree_4)) # true
|
nilq/baby-python
|
python
|
class Item:
def __init__(self, name, tag, desc, intro):
self.name = name
self.tag = tag
self.desc = desc
self.intro = intro
def __str__(self):
return f"=> {self.name} - {self.desc}"
def getItem(self, player):
player.inventory.append(self)
def getIntro(self):
return self.intro
# so the way this is set up, items pass keyword arguments to constructor only
# intro is passed in positionally as first arg
class Gum(Item):
def __init__(self, intro):
super().__init__(name="Gum",
tag="gum",
desc="a single stick of gum.",
intro=intro)
class Screwdriver(Item):
def __init__(self, intro = "It's a screwdriver"):
super().__init__(name="Screwdriver",
tag="screwdriver",
desc="this could come in handy",
intro=intro)
|
nilq/baby-python
|
python
|
# pylint: skip-file
|
nilq/baby-python
|
python
|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.task_queue."""
import tensorflow as tf
from tfx.orchestration.experimental.core import task as task_lib
from tfx.orchestration.experimental.core import task_queue
from tfx.orchestration.experimental.core import test_utils
from tfx.utils import test_case_utils as tu
def _test_task(node_id, pipeline_id):
node_uid = task_lib.NodeUid(
pipeline_uid=task_lib.PipelineUid(pipeline_id=pipeline_id),
node_id=node_id)
return test_utils.create_exec_node_task(node_uid)
class TaskQueueTest(tu.TfxTest):
def test_task_queue_operations(self):
t1 = _test_task(node_id='trainer', pipeline_id='my_pipeline')
t2 = _test_task(node_id='transform', pipeline_id='my_pipeline')
tq = task_queue.TaskQueue()
# Enqueueing new tasks is successful.
self.assertTrue(tq.enqueue(t1))
self.assertTrue(tq.enqueue(t2))
# Re-enqueueing the same tasks fails.
self.assertFalse(tq.enqueue(t1))
self.assertFalse(tq.enqueue(t2))
# Dequeue succeeds and returns `None` when queue is empty.
self.assertEqual(t1, tq.dequeue())
self.assertEqual(t2, tq.dequeue())
self.assertIsNone(tq.dequeue())
self.assertIsNone(tq.dequeue(0.1))
# Re-enqueueing the same tasks fails as `task_done` has not been called.
self.assertFalse(tq.enqueue(t1))
self.assertFalse(tq.enqueue(t2))
tq.task_done(t1)
tq.task_done(t2)
# Re-enqueueing is allowed after `task_done` has been called.
self.assertTrue(tq.enqueue(t1))
self.assertTrue(tq.enqueue(t2))
def test_invalid_task_done_raises_errors(self):
t1 = _test_task(node_id='trainer', pipeline_id='my_pipeline')
t2 = _test_task(node_id='transform', pipeline_id='my_pipeline')
tq = task_queue.TaskQueue()
# Enqueue t1, but calling `task_done` raises error since t1 is not dequeued.
self.assertTrue(tq.enqueue(t1))
with self.assertRaisesRegex(RuntimeError, 'Must call `dequeue`'):
tq.task_done(t1)
# `task_done` succeeds after dequeueing.
self.assertEqual(t1, tq.dequeue())
tq.task_done(t1)
# Error since t2 is not in the queue.
with self.assertRaisesRegex(RuntimeError, 'Task not present'):
tq.task_done(t2)
if __name__ == '__main__':
tf.test.main()
|
nilq/baby-python
|
python
|
import logging
def get_logger(log_filename=None, module_name=__name__, level=logging.INFO):
# select handler
if log_filename is None:
handler = logging.StreamHandler()
elif type(log_filename) is str:
handler = logging.FileHandler(log_filename, 'w')
else:
raise ValueError("log_filename invalid!")
# build logger
logger = logging.getLogger(module_name)
logger.setLevel(level)
handler.setLevel(level)
formatter = logging.Formatter(('%(asctime)s %(filename)s' \
'[line:%(lineno)d] %(levelname)s %(message)s'))
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
def serialize_tree_level(tree):
level_dic = {}
def dfs(u, dep = 0):
if dep not in level_dic:
level_dic[dep] = []
s = "id: %s, child: " % tree[u].id
for i in tree[u].childst:
s += str(i) + ", "
s = s[: -2]
s += "\n"
level_dic[dep].append(s)
for i in tree[u].childst:
dfs(i, dep + 1)
dfs(len(tree) - 1)
s = ""
for i in level_dic:
s += "level %d: \n" % i
for j in level_dic[i]:
s += j
s += "\n"
return s
|
nilq/baby-python
|
python
|
from view import View
from tkinter import Tk
class Controller:
def __init__(self, model):
self.model = model
self.view = View(self.model.graph.width(),
self.model.graph.height(),
self.model.graph_path)
def run(self):
self.view.draw_model(self.model)
self.view.root.mainloop()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- Mode: Python -*-
# -*- coding: ascii -*-
"""
Dump layer name list
layer containing the mesh
"""
import lwsdk
__lwver__ = "11"
class HistoryData():
def __init__(self):
self.string = ''
self.select_contains = False
self.select_others = False
class DumpLayerNameCM(lwsdk.ICommandSequence):
def __init__(self, context):
super(DumpLayerNameCM, self).__init__()
def selectLayers(self, data):
obj_funcs = lwsdk.LWObjectFuncs()
state_query = lwsdk.LWStateQueryFuncs()
obj_name = state_query.object()
layer_list = state_query.layerList(lwsdk.OPLYR_NONEMPTY, obj_name)
# there is no mesh !
if layer_list == '':
message_funcs = lwsdk.LWMessageFuncs()
message_funcs.error('No mesh data', '')
return lwsdk.AFUNC_OK
current_obj = obj_funcs.focusObject()
layers = layer_list.split(' ')
foreground_layers = []
background_layers = []
for layer in layers:
layer_int = int(layer) - 1
# layer name is (unnamed), display None
layer_name = obj_funcs.layerName(current_obj, layer_int)
if layer_name == None:
layer_name = ''
if data.select_contains == (False if layer_name.find(data.string) < 0 else True):
foreground_layers.append(layer)
else:
background_layers.append(layer)
print('foreground_layers')
print(foreground_layers)
print('background_layers')
print(background_layers)
def process(self, mod_command):
data = HistoryData
data.string = "aaa"
data.select_contains = True
data.select_others = False
self.selectLayers(data)
return lwsdk.AFUNC_OK
ServerTagInfo = [
("LW_DumpLayerNameCM", lwsdk.SRVTAG_USERNAME | lwsdk.LANGID_USENGLISH),
("LW_DumpLayerNameCM", lwsdk.SRVTAG_BUTTONNAME | lwsdk.LANGID_USENGLISH),
("Utilities/LW_DumpLayerNameCM", lwsdk.SRVTAG_MENU | lwsdk.LANGID_USENGLISH)
]
ServerRecord = {lwsdk.CommandSequenceFactory(
"LW_DumpLayerNameCM", DumpLayerNameCM): ServerTagInfo}
|
nilq/baby-python
|
python
|
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import os
import unittest
from unittest.mock import Mock, call, patch
from ci_workflow.ci_check_manifest_component import CiCheckManifestComponent
from ci_workflow.ci_target import CiTarget
from manifests.build_manifest import BuildManifest
from manifests.input_manifest import InputComponentFromDist
class TestCiCheckManifestComponent(unittest.TestCase):
DATA = os.path.join(os.path.dirname(__file__), "data")
BUILD_MANIFEST = os.path.join(DATA, "opensearch-1.1.0-x64-build-manifest.yml")
@patch("manifests.distribution.find_build_root")
@patch("ci_workflow.ci_check_manifest_component.BuildManifest")
def test_retrieves_manifests(self, mock_manifest: Mock, find_build_root: Mock):
find_build_root.return_value = 'url/linux/ARCH/builds/opensearch'
check = CiCheckManifestComponent(InputComponentFromDist({
"name": "common-utils",
"dist": "url"
}), CiTarget(version="1.1.0", name="opensearch", snapshot=True))
mock_manifest.from_url.return_value = BuildManifest.from_path(self.BUILD_MANIFEST)
check.check()
mock_manifest.from_url.assert_has_calls([
call("url/linux/ARCH/builds/opensearch/manifest.yml"),
call("url/linux/ARCH/builds/opensearch/manifest.yml"),
])
find_build_root.assert_has_calls([
call('url', 'linux', 'x64', 'opensearch'),
call('url', 'linux', 'arm64', 'opensearch'),
])
@patch("manifests.distribution.find_build_root")
@patch("ci_workflow.ci_check_manifest_component.BuildManifest")
def test_missing_component(self, mock_manifest: Mock, find_build_root: Mock):
find_build_root.return_value = 'url/linux/x64/builds/opensearch'
check = CiCheckManifestComponent(InputComponentFromDist({
"name": "does-not-exist",
"dist": "url"
}), CiTarget(version="1.1.0", name="opensearch", snapshot=True))
mock_manifest.from_url.return_value = BuildManifest.from_path(self.BUILD_MANIFEST)
with self.assertRaises(CiCheckManifestComponent.MissingComponentError) as ctx:
check.check()
self.assertEqual(str(ctx.exception), "Missing does-not-exist in url/linux/x64/builds/opensearch/manifest.yml.")
find_build_root.assert_called()
|
nilq/baby-python
|
python
|
from plugins.adversary.app.operation.operation import Step, OPVar, OPHost, OPRat, OPSoftware
from plugins.adversary.app.commands import *
from plugins.adversary.app.custom import *
class WebServerInstall(Step):
""" Description:
This step prepares the installation of a PHP webserver.
Requirements:
This step only requires the existence of a RAT on a host in order to run.
"""
display_name = 'webserver_install'
summary = 'Prepares webserver installation'
attack_mapping = [('T1094', 'Command and Control')]
preconditions = [('rat', OPRat({'elevated': True })),
('host', OPHost(OPVar('rat.host')))]
postconditions = [('software_g', OPSoftware({'name': 'webserver', 'installed': False, 'downloaded': False}))]
significant_parameters = ['host']
@staticmethod
def description(host):
return 'Preparing webserver install on {}'.format(host.fqdn)
@staticmethod
async def action(operation, rat, host, software_g):
name = 'webserver'
download_url = 'http://www.usbwebserver.net/downloads/USBWebserver%20v8.6.zip'
download_loc = (get_temp_folder(host, rat) + '{}.zip'.format(random_string()))
install_loc = (get_temp_folder(host, rat) + '{}\\'.format(random_string()))
install_command = {
'process': 'powershell.exe',
'args': '/command "Add-Type -A System.IO.Compression.FileSystem; [IO.Compression.ZipFile]::ExtractToDirectory(\'{}\', \'{}\')"'.format(download_loc, install_loc),
}
(await software_g({
'host': host,
'name': name,
'installed': False,
'install_command': install_command,
'install_loc': install_loc,
'downloaded': False,
'download_url': download_url,
'download_loc': download_loc,
}))
return True
@staticmethod
async def cleanup(cleaner, host, software_g):
for software in software_g:
if (not (await cleaner.run_on_agent(host, command.CommandLine('rmdir /s /q {}'.format(software.install_loc)), (lambda x: (x.strip() == ''))))):
(await cleaner.console_log(host, "Can't delete webserver folder on {} ({})".format(host.fqdn, software.install_loc)))
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
from django.db.models.signals import post_save, post_delete
from django.conf import settings
class SyncConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'sync'
def ready(self):
try:
from .signals import init_signals
init_signals()
print("Custom Signals Initialised")
except ImportError:
print("No Custom Signals")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Launch small HTTP server for TimeoutTest test case
Should work with Python 2 and 3.
"""
import sys
import time
try:
from SimpleHTTPServer import SimpleHTTPRequestHandler as RequestHandler
except ImportError:
from http.server import CGIHTTPRequestHandler as RequestHandler
try:
from SocketServer import TCPServer as HTTPServer
except ImportError:
from http.server import HTTPServer
PYTHON_VERSION = sys.version_info[0]
class Handler(RequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-type", "text/xml")
self.end_headers()
response_string = """
<?xml version="1.0" encoding="utf-8" ?>
<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xsd="http://www.w3.org/2001/XMLSchema">
<soap:Header>
<t:ServerVersionInfo MajorVersion="8" MinorVersion="0" MajorBuildNumber="685" MinorBuildNumber="8"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types" />
</soap:Header>
<soap:Body>
<BogusResponse xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages"
xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"
xmlns="http://schemas.microsoft.com/exchange/services/2006/messages">
<m:ResponseMessages>
<m:BogusResponseMessage ResponseClass="Success">
<m:ResponseCode>NoError</m:ResponseCode>
</m:BogusResponseMessage>
</m:ResponseMessages>
</BogusResponse>
</soap:Body>
</soap:Envelope>
"""
if PYTHON_VERSION is 3:
response = bytes(response_string, "utf-8")
else:
response = response_string
self.wfile.write(response)
def do_POST(self):
self.do_GET()
def log_message(self, format, *args):
return
server = HTTPServer(("localhost", 8080), Handler)
server.serve_forever()
|
nilq/baby-python
|
python
|
"""
PPO with tensorflow implementation
The goal of RL is to find an optimal behavior strategy for the agent to obtain
optimal rewards. The policy gradient methods target at modeling and optimizing
the policy directly. The policy loss is defined as
L = E [log pi (a|s)] * AF
where, 'L' is the policy loss, 'E' is the expected, 'log pi(a|s)' log probability
of taking the action at that state. 'AF' is the advantage.
PPO is an on-policy algorithm which can be used for environments with either discrete
or continous actions spaces. There are two primary variants of PPO: PPO-penalty which
approximately solves a KL-constrained update like TRPO, but penalizes the KL-divergence
in the objective function instead of make it a hard constraint; PPO-clip which does not
have a KL-divergence term in the objective and does not have a constraint at all,
instead relies on specialized clipping in the objective function to remove incentives
for the new policy to get far from the old policy. This implementation uses PPO-clip.
PPO is a policy gradient method and can be used for environments with either discrete
or continuous action spaces. It trains a stochastic policy in an on-policy way. Also,
it utilizes the actor critic method. The actor maps the observation to an action and
the critic gives an expectation of the rewards of the agent for the observation given.
Firstly, it collects a set of trajectories for each epoch by sampling from the latest
version of the stochastic policy. Then, the rewards-to-go and the advantage estimates
are computed in order to update the policy and fit the value function. The policy is
updated via a stochastic gradient ascent optimizer, while the value function is fitted
via some gradient descent algorithm. This procedure is applied for many epochs until
the environment is solved.
references:
[1] https://arxiv.org/pdf/1707.06347.pdf
[2] https://spinningup.openai.com/en/latest/algorithms/ppo.html
[3] https://keras.io/examples/rl/ppo_cartpole/
"""
import numpy as np
import tensorflow as tf
import gym
import scipy.signal
import datetime
import argparse
import tensorflow.keras.backend as K
from gym import wrappers
import os
"""
Replay Buffer, store experiences and calculate total rewards, advanteges
the buffer will be used for update the policy
"""
class ReplayBuffer:
def __init__(self, obs_dim, size, gamma=0.99, lamda=0.95):
self.obs_buf = np.zeros((size, obs_dim), dtype=np.float32) # states
self.act_buf = np.zeros(size, dtype=np.int32) # action, based on stochasitc policy with teh probability
self.rew_buf = np.zeros(size, dtype=np.float32) # step reward
self.ret_buf = np.zeros(size, dtype=np.float32) # ep_return, total reward of episode
self.val_buf = np.zeros(size, dtype=np.float32) # value of (s,a), output of critic net
self.adv_buf = np.zeros(size, dtype=np.float32) # advantege Q(s,a)-V(s)
self.logprob_buf = np.zeros(size, dtype=np.float32) # prediction: action probability, output of actor net
self.gamma, self.lamda = gamma, lamda
self.ptr, self.idx = 0, 0 # buffer ptr, and current trajectory start index
def store(self, observation, action, reward, value, logprob):
#print("storing", state[0].shape, action.shape, reward, prediction.shape, value.shape)
self.obs_buf[self.ptr]=observation
self.act_buf[self.ptr]=action
self.rew_buf[self.ptr]=reward
self.val_buf[self.ptr]=value
self.logprob_buf[self.ptr]=logprob
self.ptr += 1
"""
For each epidode, calculating the total reward and advanteges with specific
"""
def ep_update(self, lastValue = 0):
"""
magic from rllab for computing discounted cumulative sums of vectors
input: vector x: [x0, x1, x2]
output: [x0+discount*x1+discount^2*x2, x1+discount*x2, x2]
"""
def discount_cumsum(x,discount):
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
ep_slice = slice(self.idx, self.ptr)
rews = np.append(self.rew_buf[ep_slice], lastValue)
vals = np.append(self.val_buf[ep_slice], lastValue)
deltas = rews[:-1]+self.gamma*vals[1:]-vals[:-1]
# General Advantege Estimation
self.adv_buf[ep_slice] = discount_cumsum(deltas, self.gamma*self.lamda)
# rewards-to-go, which is targets for the value function
self.ret_buf[ep_slice] = discount_cumsum(rews, self.gamma)[:-1]
self.idx = self.ptr
def get(self):
# get all data of the buffer and normalize the advantages
self.ptr, self.idx = 0, 0
adv_mean, adv_std = np.mean(self.adv_buf), np.std(self.adv_buf)
self.adv_buf = (self.adv_buf-adv_mean)/adv_std
return dict(
states=self.obs_buf,
actions=self.act_buf,
advantages=self.adv_buf,
returns=self.ret_buf,
logprobs=self.logprob_buf,
)
"""
loss print call back
"""
class PrintLoss(tf.keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs={}):
print("epoch index", epoch+1, "loss", logs.get('loss'))
"""
build a feedforward neural network
"""
def mlp(obsDim, hiddenSize, numActions, outputActivation=None):
inputs = tf.keras.Input(shape=(obsDim,), dtype=tf.float32)
x = tf.keras.layers.Dense(units=hiddenSize[0], activation='tanh')(inputs)
for i in range(1, len(hiddenSize)):
x = tf.keras.layers.Dense(units=hiddenSize[i], activation='tanh')(x)
logits = tf.keras.layers.Dense(units=numActions, activation=outputActivation)(x)
return tf.keras.Model(inputs = inputs, outputs=logits)
def logprobabilities(logits, action, numActions):
logprob_all = tf.nn.log_softmax(logits)
logprob = tf.reduce_sum(tf.one_hot(action, numActions)*logprob_all, axis=1)
return logprob
"""
Actor net
"""
class ActorModel:
def __init__(self, obsDim, hiddenSize, numActions, clipRatio, lr):
self.policyNN = self.build_model(obsDim, hiddenSize, numActions, lr)
self.clipRatio = clipRatio
self.numActions = numActions
self.lossPrinter = PrintLoss()
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
def build_model(self, obsDim, hiddenSize, numActions, lr):
model = mlp(obsDim, hiddenSize, numActions)
# model.compile(loss=self.ppo_loss, optimizer=tf.keras.optimizers.Adam(learning_rate=lr))
# print(model.summary())
return model
# def ppo_loss(self, y_true, y_pred):
# # y_true: np.hstack([advantages, predictions, actions])
# advs,o_pred,acts = y_true[:,:1],y_true[:,1:1+self.numActions],y_true[:,1+self.numActions:]
# # print(y_pred, advs, picks, acts)
# prob = y_pred*acts
# old_prob = o_pred*acts
# ratio = prob/(old_prob + 1e-10)
# p1 = ratio*advs
# p2 = K.clip(ratio, 1-self.clipRatio, 1+self.clipRatio)*advs
# # total loss = policy loss + entropy loss (entropy loss for promote action diversity)
# loss = -K.mean(K.minimum(p1,p2)+self.beta*(-y_pred*K.log(y_pred+1e-10)))
# return loss
# def fit(self,states,y_true,epochs,batch_size):
# self.actor.fit(states, y_true, epochs=epochs, verbose=0, shuffle=True, batch_size=batch_size, callbacks=[self.lossPrinter])
def predict(self, obs):
obs = obs.reshape(1,-1)
logits = self.policyNN(obs)
action = tf.squeeze(tf.random.categorical(logits, 1),axis=1)
return logits, action
@tf.function
def train_policy(self, obs_buf, act_buf, logprob_buf, adv_buf):
# Record operation for automtic differentiation
with tf.GradientTape() as tape:
logits = self.policyNN(obs_buf)
ratio = tf.exp(logprobabilities(logits, act_buf, self.numActions)-logprob_buf)
minAdv = tf.where(adv_buf > 0, (1+self.clipRatio)*adv_buf, (1-self.clipRatio)*adv_buf)
policyLoss = -tf.reduce_mean(tf.minimum(ratio*adv_buf, minAdv))
policyGrads = tape.gradient(policyLoss, self.policyNN.trainable_variables)
self.optimizer.apply_gradients(zip(policyGrads, self.policyNN.trainable_variables))
k1 = tf.reduce_mean(logprob_buf - logprobabilities(self.policyNN(obs_buf), act_buf, self.numActions))
k1 = tf.reduce_sum(k1)
return k1
"""
Critic net
"""
class CriticModel:
def __init__(self, obsDim, hiddenSize, lr):
self.valueNN = self.build_model(obsDim, hiddenSize, lr)
self.lossPrinter = PrintLoss()
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
def build_model(self, obsDim, hiddenSize, lr):
model = mlp(obsDim, hiddenSize, 1)
# model.compile(loss="mse",optimizer=tf.keras.optimizers.Adam(learning_rate=lr))
# print(model.summary())
return model
def predict(self,obs):
obs = obs.reshape(1,-1)
digits = self.valueNN(obs)
value = tf.squeeze(digits, axis=1)
return value
# def fit(self,states,y_true,epochs,batch_size):
# self.critic.fit(states, y_true, epochs=epochs, verbose=0, shuffle=True, batch_size=batch_size, callbacks=[self.lossPrinter])
@tf.function
def train_value(self, obs_buf, ret_buf):
# Record operations for automatic differentiation
with tf.GradientTape() as tape:
valueLoss = tf.reduce_mean((ret_buf - self.valueNN(obs_buf)) ** 2)
valueGrads = tape.gradient(valueLoss, self.valueNN.trainable_variables)
self.optimizer.apply_gradients(zip(valueGrads, self.valueNN.trainable_variables))
"""
PPO Agent
"""
class PPOAgent:
def __init__(self, obsDim, hiddenSize, numActions, clipRatio, policyLR, valueLR, memorySize, gamma, lamda, targetK1):
self.buffer = ReplayBuffer(obsDim, memorySize, gamma, lamda)
self.Actor = ActorModel(obsDim, hiddenSize, numActions, clipRatio, policyLR)
self.Critic = CriticModel(obsDim, hiddenSize, valueLR)
self.actDim = numActions
self.targetK1 = targetK1
def action(self, obs):
# sample action from actor
logits, action = self.Actor.predict(obs)
# get log-probability of taking actins by using the logits
logprob = logprobabilities(logits, action, self.actDim)
# get value
value = self.Critic.predict(obs)
return logprob, action, value
def train(self, itActor=80, itCritic=80):
data = self.buffer.get()
obs_buf = data['states']
act_buf = data['actions']
adv_buf = data['advantages']
ret_buf = data['returns']
logprob_buf = data['logprobs']
# train polict network
for _ in range(itActor):
k1 = self.Actor.train_policy(obs_buf, act_buf, logprob_buf, adv_buf)
if k1 > 1.5 * self.targetK1:
break # Early Stopping
# train value network
for _ in range(itCritic):
self.Critic.train_value(obs_buf, ret_buf)
#######
np.random.seed(123)
def make_video(env, agent):
env = wrappers.Monitor(env,os.path.join(os.getcwd(),"videos"), force=True)
rewards = 0
steps = 0
done = False
obs = env.reset()
while not done:
env.render()
logprob, action, value = agent.action(obs)
obs, reward, done, _ = env.step(action[0].numpy())
steps += 1
rewards += reward
if done:
env.reset()
print("Test Step {} Rewards {}".format(steps, rewards))
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--max_ep', type=int, default=10000)
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
maxEpoch = args.max_ep
epSteps = 4000
gamma = 0.99
lamda = 0.97
clipRatio = 0.2
policyLearningRate = 3e-4
valueLearningRate = 1e-3
policyTrainingIteration = 80
valueTrainingIteration = 80
targetK1 = 0.01
currTime = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logDir = 'logs/ppo' + currTime
summaryWriter = tf.summary.create_file_writer(logDir)
env = gym.make('CartPole-v0')
obsDim = env.observation_space.shape[0]
numActions = env.action_space.n
hiddenSize = [64,64]
agent = PPOAgent(obsDim,hiddenSize,numActions,clipRatio,policyLearningRate,valueLearningRate,epSteps,gamma,lamda,targetK1)
obs, epReturn, epLength = env.reset(), 0, 0
# Iteration over the number of epochs
for ep in range(maxEpoch):
sumReturn = 0
sumLength = 0
numEpisodes = 0
# Iterate over the steps of each epoch
for t in range(epSteps):
logprob, action, value = agent.action(obs)
newobs, reward, done, _ = env.step(action[0].numpy())
epReturn += reward
epLength += 1
agent.buffer.store(obs, action, reward, value, logprob)
obs = newobs
# finish trajectory if reach to a terminal state
if done or (t == epSteps-1):
lastValue = 0 if done else agent.Critic.predict(obs)
agent.buffer.ep_update(lastValue)
sumReturn += epReturn
sumLength += epLength
numEpisodes += 1
with summaryWriter.as_default():
tf.summary.scalar('episode reward', epReturn, step=numEpisodes)
obs, epReturn, epLength = env.reset(), 0, 0
# update policy and value function
agent.train(policyTrainingIteration, valueTrainingIteration)
print("Episode: {} Average Rewards: {:.4f} Mean Length {:.4f} ".format(ep+1, sumReturn/numEpisodes, sumLength/numEpisodes))
make_video(env, agent)
env.close()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import pwd
import grp
import errno
import config
import subprocess
import simplegist
import unicodedata
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from tornado.options import options
from jinja2 import Environment, FileSystemLoader
import tornado.web
api_logger = config.getlog()
class BaseHandler(tornado.web.RequestHandler):
"""
Base Class used on every Handler
"""
def checkMaven(self):
pass
class execCommand(object):
def __init__(self, cmdlaunch):
self.cmdlaunch = cmdlaunch
def execute(self):
launch = subprocess.Popen(self.cmdlaunch, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = launch.communicate()
return output, err
class Utils(object):
def lastlines(self, hugefile, n, bsize=2048):
# get newlines type, open in universal mode to find it
with open(hugefile, 'rU') as hfile:
if not hfile.readline():
return # empty, no point
sep = hfile.newlines # After reading a line, python gives us this
assert isinstance(sep, str), 'multiple newline types found, aborting'
# find a suitable seek position in binary mode
with open(hugefile, 'rb') as hfile:
hfile.seek(0, os.SEEK_END)
linecount = 0
pos = 0
while linecount <= n + 1:
# read at least n lines + 1 more; we need to skip a partial line later on
try:
hfile.seek(-bsize, os.SEEK_CUR) # go backwards
linecount += hfile.read(bsize).count(sep) # count newlines
hfile.seek(-bsize, os.SEEK_CUR) # go back again
except IOError as e:
if e.errno == errno.EINVAL:
# Attempted to seek past the start, can't go further
bsize = hfile.tell()
hfile.seek(0, os.SEEK_SET)
linecount += hfile.read(bsize).count(sep)
break
raise # Some other I/O exception, re-raise
pos = hfile.tell()
# Re-open in text mode
with open(hugefile, 'r') as hfile:
hfile.seek(pos, os.SEEK_SET) # our file position from above
for line in hfile:
# We've located n lines *or more*, so skip if needed
if linecount > n:
linecount -= 1
continue
# The rest we yield
yield line
def checkAndcreate(self, dir, user, group):
if not os.path.exists(dir):
os.makedirs(dir)
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(dir, uid, gid)
return 1
return 0
def changeOwner(self, filePath, user, group):
if os.path.exists(filePath):
uid = pwd.getpwnam(user).pw_uid
gid = grp.getgrnam(group).gr_gid
os.chown(filePath, uid, gid)
return 1
return 0
def write_module(self, module_name, module_lang, source_code, dst_path, module_type):
"""Gets the source code of a module from a GitHub gist.
Args:
module_name: The name of the module.
module_lang: Code language.
source_code: Gist url.
dst_path: Absolute path for module on file sytem.
Returns:
The file system path of the newly created module.
Raises:
IOError: An error occurred accessing GitHub or creating the source files.
"""
print(type(source_code))
api_logger.info("Module name: " + str(module_name))
api_logger.info("Module lang: " + str(module_lang))
# api_logger.info("Source code: "+str(source_code))
api_logger.info("DST_PATH: " + str(dst_path))
api_logger.info("MODULE Type: " + str(module_type))
if module_lang == "py":
file_name = os.path.join(dst_path, module_name.lower() + "." + module_lang)
elif module_lang == "java":
file_name = os.path.join(dst_path, module_name + "." + module_lang)
# Get file name for gist and put into
try:
with open(file_name, "w") as text_file:
text_file.write(unicodedata.normalize('NFKD', source_code).encode('ascii', 'ignore'))
self.changeOwner(file_name, "storm", "storm")
except Exception as e:
print(str(e))
api_logger.error(str(e))
raise e
if module_lang == "py":
# Time to jinja2
# Check module type
if module_type == "drain":
boltType = "drains"
dst_path = options.backend_java_path_drains
template_name = options.backend_template_path + "boltjava2python.tmpl"
elif module_type == "bolt":
boltType = "bolts"
dst_path = options.backend_java_path_bolts
template_name = options.backend_template_path + "boltjava2python.tmpl"
elif module_type == "spout":
boltType = "spouts"
dst_path = options.backend_java_path_spouts
template_name = options.backend_template_path + "spoutjava2python.tmpl"
env = Environment(loader=FileSystemLoader('/'))
template = env.get_template(template_name)
file_name = os.path.join(dst_path, module_name + ".java")
try:
with open(file_name, "w") as text_file:
text_file.write(
template.render(boltName=module_name, boltType=boltType, boltNamelowercase=module_name.lower()))
self.changeOwner(file_name, "storm", "storm")
except Exception as e:
api_logger.error(str(e))
raise e
return file_name
def get_module(self, module_name, module_lang, gist_url, dst_path, module_type):
"""Gets the source code of a module from a GitHub gist.
Args:
module_name: The name of the module.
module_lang: Code language.
gist_url: Gist url.
dst_path: Absolute path for module on file sytem.
Returns:
The file system path of the newly created module.
Raises:
IOError: An error occurred accessing GitHub or creating the source files.
"""
# Start gist handler
API_TOKEN = options.gist_api_token
USERNAME = options.gist_username
GHgist = simplegist.Simplegist(username=USERNAME, api_token=API_TOKEN)
api_logger.info("Module name: " + str(module_name))
api_logger.info("Module lang: " + str(module_lang))
api_logger.info("Gist URL: " + str(gist_url))
api_logger.info("DST_PATH: " + str(dst_path))
api_logger.info("MODULE Type: " + str(module_type))
# Get Id and user from URL
gist_id_reg = re.compile('([a-zA-Z0-9]+)')
gist_user, gist_id = gist_id_reg.findall(urlparse(gist_url).path)
api_logger.info("Gist USER: " + str(gist_user))
api_logger.info("Gist ID: " + str(gist_id))
# Download code from GIST
GHgist.profile().getgist(id=gist_id)
# Authenticate using a GitHub API access token.
if module_lang == "py":
file_name = os.path.join(dst_path, module_name.lower() + "." + module_lang)
elif module_lang == "java":
file_name = os.path.join(dst_path, module_name + "." + module_lang)
else:
file_name = None
# Get file name for gist and put into
try:
with open(file_name, "w") as text_file:
text_file.write(
unicodedata.normalize('NFKD', GHgist.profile().content(id=gist_id)).encode('ascii', 'ignore'))
self.changeOwner(file_name, "storm", "storm")
except Exception as e:
api_logger.error(str(e))
raise e
if module_lang == "py":
# Time to jinja2
# Check module type
if module_type == "drain":
boltType = "drains"
dst_path = options.backend_java_path_drains
template_name = options.backend_template_path + "boltjava2python.tmpl"
elif module_type == "bolt":
boltType = "bolts"
dst_path = options.backend_java_path_bolts
template_name = options.backend_template_path + "boltjava2python.tmpl"
elif module_type == "spout":
boltType = "spouts"
dst_path = options.backend_java_path_spouts
template_name = options.backend_template_path + "spoutjava2python.tmpl"
env = Environment(loader=FileSystemLoader('/'))
template = env.get_template(template_name)
file_name = os.path.join(dst_path, module_name + ".java")
try:
with open(file_name, "w") as text_file:
text_file.write(
template.render(boltName=module_name, boltType=boltType, boltNamelowercase=module_name.lower()))
self.changeOwner(file_name, "storm", "storm")
except Exception as e:
api_logger.error(str(e))
raise e
return file_name
|
nilq/baby-python
|
python
|
import math as m
import numpy as np
from matplotlib import pyplot as plt
from BDPoisson1D import dirichlet_non_linear_poisson_solver_amr
from BDFunction1D import Function
from BDFunction1D.Functional import Functional
class TestFunction(Function):
"""
Some known differentiable function
"""
def evaluate_point(self, x):
return m.exp(-x * 3)
class TestFunctional(Functional):
def __init__(self, Nd, kT, f):
super(TestFunctional, self).__init__(f)
self.Nd = Nd
self.kT = kT
def evaluate_point(self, x):
return self.Nd(x) * (1 - (m.exp(-self.f.evaluate_point(x) / self.kT)))
class TestFunctionalDf(Functional):
def __init__(self, Nd, kT, f):
super(TestFunctionalDf, self).__init__(f)
self.Nd = Nd
self.kT = kT
def evaluate_point(self, x):
return self.Nd(x) / self.kT * m.exp(-self.f.evaluate_point(x) / self.kT)
Nd = lambda x: np.ones_like(x)
kT = 1 / 20
Psi = TestFunction()
f = TestFunctional(Nd, kT, Psi)
dfdPsi = TestFunctionalDf(Nd, kT, Psi)
start = 0.0
stop = 5.0
step = 0.5
bc1 = 1.0
bc2 = 0.0
solution = dirichlet_non_linear_poisson_solver_amr(start, stop, step, Psi, f, dfdPsi, bc1, bc2,
max_iter=1000, residual_threshold=1.5e-3,
int_residual_threshold=1.5e-4,
max_level=20, mesh_refinement_threshold=1e-7)
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
nodes = np.linspace(start, stop, num=int((stop-start)/step+1))
ax1.plot(nodes, solution.evaluate(nodes), '-')
ax2.plot(nodes, solution.error(nodes), '-')
plt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import glob
for name in glob.glob('dir/*'):
print name
|
nilq/baby-python
|
python
|
"""
Image conversion functions.
"""
# Copyright (c) 2020 Ben Zimmer. All rights reserved.
from typing import Tuple
import numpy as np
from PIL import Image
# Some functions for colorizing single channel black and white image (PIL "L" mode)
# or the alpha channels of text_scala output.
# ~~~~ function from text_scala
def colorize(img: np.ndarray, color: Tuple) -> np.ndarray:
"""colorize a single-channel (alpha) image into a 4-channel RGBA image"""
# ensure color to RGBA
if len(color) == 3:
color = (color[0], color[1], color[2], 255)
# created result image filled with solid "color"
res = np.zeros((img.shape[0], img.shape[1], 4), dtype=np.ubyte)
res[:, :, 0:4] = color
# scale the alpha component by the image
# (this comes into play if "color" has alpha < 255)
res[:, :, 3] = color[3] / 255.0 * img
# set the RGB of completely transparent pixels to zero
res[res[:, :, 3] == 0, 0:3] = (0, 0, 0)
return res
# ~~~~ function the old text module
# pretty much the only difference between these is order of operations
# in scaling of alpha. Could programatically verify that both do the
# same thing.
def l_to_rgba(img: np.ndarray, color: Tuple) -> np.ndarray:
"""create a colorized transparent image from black and white"""
# create result image filled with solid "color"
height, width = img.shape
solid = Image.new("RGBA", (width, height), color)
res = np.array(solid)
# scale the alpha component by the image
# (this comes into play if "color" has alpha < 255)
res[:, :, 3] = res[:, :, 3] * (img / 255.0)
# set the RGB of completely transparent pixels to zero
res[res[:, :, 3] == 0, 0:3] = (0, 0, 0)
return res
|
nilq/baby-python
|
python
|
import pandas as pd
def generate_train(playlists):
# define category range
cates = {'cat1': (10, 50), 'cat2': (10, 78), 'cat3': (10, 100), 'cat4': (40, 100), 'cat5': (40, 100),
'cat6': (40, 100),'cat7': (101, 250), 'cat8': (101, 250), 'cat9': (150, 250), 'cat10': (150, 250)}
cat_pids = {}
for cat, interval in cates.items():
df = playlists[(playlists['num_tracks'] >= interval[0]) & (playlists['num_tracks'] <= interval[1])].sample(
n=1000)
cat_pids[cat] = list(df.pid)
playlists = playlists.drop(df.index)
playlists = playlists.reset_index(drop=True)
return playlists, cat_pids
def generate_test(cat_pids, playlists, interactions, tracks):
def build_df_none(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
def build_df_name(cat_pids, playlists, cat, num_samples):
df = playlists[playlists['pid'].isin(cat_pids[cat])]
df = df[['name', 'pid', 'num_tracks']]
df['num_samples'] = num_samples
df['num_holdouts'] = df['num_tracks'] - df['num_samples']
return df
df_test_pl = pd.DataFrame()
df_test_itr = pd.DataFrame()
df_eval_itr = pd.DataFrame()
for cat in list(cat_pids.keys()):
if cat == 'cat1':
num_samples = 0
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
# all interactions used for evaluation
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
df_eval_itr = pd.concat([df_eval_itr, df_itr])
# clean interactions for training
interactions = interactions.drop(df_itr.index)
print("cat1 done")
if cat == 'cat2':
num_samples = 1
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[df_itr['pos'] == 0]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat2 done")
if cat == 'cat3':
num_samples = 5
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat3 done")
if cat == 'cat4':
num_samples = 5
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat4 done")
if cat == 'cat5':
num_samples = 10
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat5 done")
if cat == 'cat6':
num_samples = 10
df = build_df_none(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat6 done")
if cat == 'cat7':
num_samples = 25
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat7 done")
if cat == 'cat8':
num_samples = 25
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
for pid in cat_pids[cat]:
df = df_itr[df_itr['pid'] == pid]
df_sample = df.sample(n=num_samples)
df_test_itr = pd.concat([df_test_itr, df_sample])
df = df.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df])
print("cat8 done")
if cat == 'cat9':
num_samples = 100
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
df_sample = df_itr[(df_itr['pos'] >= 0) & (df_itr['pos'] < num_samples)]
df_test_itr = pd.concat([df_test_itr, df_sample])
df_itr = df_itr.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df_itr])
print("cat9 done")
if cat == 'cat10':
num_samples = 100
df = build_df_name(cat_pids, playlists, cat, num_samples)
df_test_pl = pd.concat([df_test_pl, df])
df_itr = interactions[interactions['pid'].isin(cat_pids[cat])]
# clean interactions for training
interactions = interactions.drop(df_itr.index)
for pid in cat_pids[cat]:
df = df_itr[df_itr['pid'] == pid]
df_sample = df.sample(n=num_samples)
df_test_itr = pd.concat([df_test_itr, df_sample])
df = df.drop(df_sample.index)
df_eval_itr = pd.concat([df_eval_itr, df])
print("cat10 done")
tids = set(df_eval_itr['tid'])
df = tracks[tracks['tid'].isin(tids)]
df = df[['tid', 'arid']]
df_eval_itr = pd.merge(df_eval_itr, df, on='tid')
df_test_pl = df_test_pl.reset_index(drop=True)
df_test_itr = df_test_itr.reset_index(drop=True)
df_eval_itr = df_eval_itr.reset_index(drop=True)
interactions = interactions.reset_index(drop=True) # return as train_interactions
return df_test_pl, df_test_itr, df_eval_itr, interactions
def split_dataset(df_playlists, df_interactions, df_tracks):
"""
Split the MPD according to Challenge_set features
:param df_playlists: DataFrame from "playlists.csv"
:param df_interactions: DataFrame from "interactions.csv"
:param df_tracks: DataFrame from "tracks.csv"
:return: df_train_pl: a DataFrame with same shape as "playlists.csv" for training
df_train_itr: a DataFrame with same shape as "interactions.csv" for training
df_test_pl: a DataFrame of 10,000 incomplete playlists for testing
df_test_itr: a DataFrame with same shape as " interactions.csv" for testing
df_eval_itr: a DataFrame of holdout interactions for evaluation
"""
df_train_pl, cat_pids = generate_train(df_playlists)
df_test_pl, df_test_itr, df_eval_itr, df_train_itr = generate_test(cat_pids, df_playlists, df_interactions, df_tracks)
return df_train_pl, df_train_itr, df_test_pl, df_test_itr, df_eval_itr
|
nilq/baby-python
|
python
|
'''
Copyright (C) 2018 PyElo.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import math
# Expected score of player A with rating 'rating_a' against player B with
# 'rating_b'.
def expected_score(rating_a, rating_b):
return 1.0 / (1.0 + 10.0 ** ((rating_b - rating_a) / 400.0))
# Change in rating based on expected and actual score.
def rating_delta(score, expected, k=20):
if k <= 0:
raise ValueError("k must be positive.")
return k * (score - expected)
# Update individual ratings after a 1v1 match. The pair of new ratings is
# returned as a tuple (new rating of player A, new rating of B). K factors may
# be individually set for both players.
def update_rating(rating_a, rating_b, score, k_a=20, k_b=20):
if k_a <= 0:
raise ValueError("k_a must be positive.")
if k_b <= 0:
raise ValueError("k_b must be positive.")
expected_a = expected_score(rating_a, rating_b)
expected_b = 1 - expected_a
rating_a += rating_delta(score, expected_a, k_a)
rating_b += rating_delta(1 - score, expected_b, k_b)
return (rating_a, rating_b)
# Expected score of team A against team B. Teams are a list of player ratings.
def expected_team_score(team_a, team_b):
if len(team_a) == 0:
raise ValueError("team_a must have at least one rating.")
if len(team_b) == 0:
raise ValueError("team_b must have at least one rating.")
return expected_score(sum(team_a), sum(team_b))
# Convert Elo ratings to the Bradley-Terry scale.
def elo_to_bt(elo_rating):
return 10.0 ** (elo_rating / 400.0)
# Update team ratings, where a team is a collection of ratings. The pair of new
# ratings is returned of (new ratings of team A, new ratings of team B) in the
# given order. K factors may be individually set for both teams.
def update_team_rating(team_a, team_b, score, k_a=20, k_b=20):
if k_a <= 0:
raise ValueError("k_a must be positive.")
if k_b <= 0:
raise ValueError("k_b must be positive.")
if len(team_a) == 0:
raise ValueError("team_a must have at least one rating.")
if len(team_b) == 0:
raise ValueError("team_b must have at least one rating.")
expected_a = expected_team_score(team_a, team_b)
expected_b = 1 - expected_a
delta_a = rating_delta(score, expected_a, k_a * len(team_a))
delta_b = rating_delta(1 - score, expected_b, k_b * len(team_b))
# Teams' ratings converted to the Bradley-Terry scale.
bt_team_a = [elo_to_bt(rating) for rating in team_a]
bt_team_b = [elo_to_bt(rating) for rating in team_b]
# Calculate normalization quotient.
norm_bt_team_a = sum(bt_team_a)
norm_bt_team_b = sum(bt_team_b)
# Normalize Bradley-Terry team ratings.
bt_team_a = [rating / norm_bt_team_a for rating in bt_team_a]
bt_team_b = [rating / norm_bt_team_b for rating in bt_team_b]
# Apply deltas in terms of normalized ratings.
team_a_delta = [delta_a * rating for rating in bt_team_a]
team_b_delta = [delta_b * rating for rating in bt_team_b]
# Return updated ratings.
return ([rating + delta for rating, delta in zip(team_a, team_a_delta)], [rating + delta for rating, delta in zip(team_b, team_b_delta)])
# Expected score in a match with multiple ranks.
def expected_rank_score(ranks):
if len(ranks) <= 1:
raise ValueError("The length of ranks must be 2 or greater.")
return [sum(expected_score(ranks[i], opp_rating) for j, opp_rating in enumerate(ranks) if i != j) for i, rating in enumerate(ranks)]
# Expected placing in a match with multiple ranks. Return values are not
# rounded to the nearest integer.
def expected_place(rating, opponent_ratings):
if len(opponent_ratings) == 0:
raise ValueError("opponent_ratings must have at least one rating.")
return 1 + len(opponent_ratings) - sum(expected_score(rating, opp_rating) for opp_rating in opponent_ratings)
# Update the rating of a ranking of players, where ranks is a list of ratings
# sorted by results: the first element of the list is 1st place, the second is
# 2nd place, and so on. Ratings are returned in the same order, and K factors
# may either be set for all players or individually for each player.
def update_rank_rating(ranks, k=20):
if len(ranks) <= 1:
raise ValueError("The length of ranks must have two ratings or greater.")
if type(k) is list:
if len(k) != len(ranks):
raise ValueError("The length of ranks must be the same as the length of k, or a single k factor should be given.")
# Check if all k are positive.
if sum(1 for individual_k in k if individual_k <= 0) > 0:
raise ValueError("All k factors must be positive.")
else:
if k <= 0:
raise ValueError("k must be positive.")
# Add len(ranks) - 1 elements to k.
k = [k] * len(ranks)
expected = expected_rank_score(ranks)
# Calculate k normalization quotient.
k_norm = len(ranks) - 1
scores = list(range(k_norm, -1, -1))
return [rating + rating_delta(score, individual_expected, individual_k / k_norm) for rating, score, individual_expected, individual_k in zip(ranks, scores, expected, k)]
# Get the base-2 entropy of a Bernoulli(p) distribution.
def bernoulli_entropy(p):
if p <= 0 or p >= 1:
raise ValueError("p must be greater than 0 and less than 1.")
return -(p * math.log2(p) + (1 - p) * math.log2(1 - p))
# Get the fairness of a match between player A and player B, with 0 being the
# least fair and 1 being the most fair.
def fairness(rating_a, rating_b):
return bernoulli_entropy(expected_score(rating_a, rating_b))
# Get the fairness of a match between team A and team B.
def fairness_team(team_a, team_b):
if len(team_a) == 0:
raise ValueError("team_a must have at least one rating.")
if len(team_b) == 0:
raise ValueError("team_b must have at least one rating.")
return bernoulli_entropy(expected_team_score(team_a, team_b))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import unittest
from gilded_rose import Item, GildedRose
class GildedRoseTest(unittest.TestCase):
def test_foo_quality_never_below_zero(self):
items = [Item("foo", 0, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual("foo", items[0].name)
self.assertEqual(0, items[0].quality)
def test_foo_quality_decreases_by_one(self):
items = [Item("foo", 0, 1)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].quality)
def test_foo_quality_decreases_twice_as_fast_after_sell_date(self):
items = [Item("foo", -1, 2)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].quality)
def test_foo_sellin_decreases_by_one(self):
items = [Item("foo", 1, 1)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].sell_in)
def test_aged_brie_increases_in_quality(self):
items = [Item("Aged Brie", 1, 0)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(1, items[0].quality)
def test_aged_brie_increases_in_quality_up_to_50(self):
items = [Item("Aged Brie", 1, 50)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(50, items[0].quality)
def test_sulfuras_does_not_decrease_in_quality(self):
items = [Item("Sulfuras, Hand of Ragnaros", 1, 10)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(10, items[0].quality)
def test_sulfuras_sellin_does_not_decreases(self):
items = [Item("Sulfuras, Hand of Ragnaros", 1, 1)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(1, items[0].sell_in)
def test_backstage_passes_quality_increases_by_two_ten_days_or_less(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 10, 3)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(5, items[0].quality)
def test_backstage_passes_quality_increases_by_three_five_days_or_less(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 5, 3)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(6, items[0].quality)
def test_backstage_passes_quality_drops_to_zero_after_concert(self):
items = [Item("Backstage passes to a TAFKAL80ETC concert", 0, 3)]
gilded_rose = GildedRose(items)
gilded_rose.update_quality()
self.assertEqual(0, items[0].quality)
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import gpt_2_simple as gpt2
import sys
if len(sys.argv) > 1:
prompt = sys.argv[1]
else:
prompt = "prompt: So, what's new around here?"
print(prompt)
sys.exit(1)
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess)
single_text = gpt2.generate(
sess,
return_as_list=True,
temperature=0.75,
include_prefix=False,
truncate="<|endoftext|>",
prefix="""ASCII Today - Fun with the Teletype Terminal"""
)[0]
print(single_text)
|
nilq/baby-python
|
python
|
# Please refrain from specifying a micro version if possible.
# --------------------------------------------------------------------------- #
VERSION = (1, 1)
# --------------------------------------------------------------------------- #
def _get_version(vt): # pragma: nocover # noqa
vt = tuple(map(str, vt)) # pragma: nocover # noqa
m = map(lambda v: v.startswith(('a', 'b', 'rc')), vt) # pragma: nocover # noqa
try: # pragma: nocover # noqa
i = next(i for i, v in enumerate(m) if v) # pragma: nocover # noqa
except StopIteration: # pragma: nocover # noqa
return '.'.join(vt) # pragma: nocover # noqa
return '.'.join(vt[:i]) + '.'.join(vt[i:]) # pragma: nocover # noqa
__version__ = _get_version(VERSION)
del _get_version
from . import common # noqa
from .common import EncodingType # noqa
from . import asymmetric # noqa
from .asymmetric import * # noqa
from . import x509 # noqa
from .x509 import * # noqa
|
nilq/baby-python
|
python
|
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run some automations to test things"""
from __future__ import unicode_literals
from __future__ import print_function
import sys
import os.path
import time
try:
from pywinauto import application
except ImportError:
pywinauto_path = os.path.abspath(__file__)
pywinauto_path = os.path.split(os.path.split(pywinauto_path)[0])[0]
sys.path.append(pywinauto_path)
from pywinauto import application
import pywinauto
from pywinauto import tests
#from pywinauto.findbestmatch import MatchError
from pywinauto.timings import Timings
def run_notepad():
"""Run notepad and do some small stuff with it"""
print("Run with option 'language' e.g. notepad_fast.py language to use")
print("application data. This should work on any language Windows/Notepad")
print()
print("Trying fast timing settings - it's possible these won't work")
print("if pywinauto tries to access a window that is not accessible yet")
# use fast timings - but allow to wait for windows a long time
Timings.fast()
Timings.window_find_timeout = 10
start = time.time()
run_with_appdata = False
if len(sys.argv) > 1 and sys.argv[1].lower() == 'language':
run_with_appdata = True
scriptdir = os.path.split(os.path.abspath(__file__))[0]
if run_with_appdata:
print("\nRunning this script so it will load application data and run")
print("against any lanuguage version of Notepad/Windows")
# make sure that the app data gets read from the same folder as
# the script
app = application.Application(
os.path.join(scriptdir, "Notepad_fast.pkl"))
else:
app = application.Application()
## for distribution we don't want to connect to anybodies application
## because we may mess up something they are working on!
#try:
# app.connect_(path = r"c:\windows\system32\notepad.exe")
#except application.ProcessNotFoundError:
# app.start_(r"c:\windows\system32\notepad.exe")
app.start(r"notepad.exe")
app.Notepad.menu_select("File->PageSetup")
# ----- Page Setup Dialog ----
# Select the 4th combobox item
app.PageSetupDlg.SizeComboBox.select(4)
# Select the 'Letter' combobox item or the Letter
try:
app.PageSetupDlg.SizeComboBox.select("Letter")
except ValueError:
app.PageSetupDlg.SizeComboBox.select('Letter (8.5" x 11")')
app.PageSetupDlg.SizeComboBox.select(2)
# run some tests on the Dialog. List of available tests:
# "AllControls",
# "AsianHotkey",
# "ComboBoxDroppedHeight",
# "CompareToRefFont",
# "LeadTrailSpaces",
# "MiscValues",
# "Missalignment",
# "MissingExtraString",
# "Overlapping",
# "RepeatedHotkey",
# "Translation",
# "Truncation",
bugs = app.PageSetupDlg.run_tests('RepeatedHotkey Truncation')
# if there are any bugs they will be printed to the console
# and the controls will be highlighted
tests.print_bugs(bugs)
# ----- Next Page Setup Dialog ----
app.PageSetupDlg.Printer.click()
# do some radio button clicks
# Open the Connect to printer dialog so we can
# try out checking/unchecking a checkbox
app.PageSetupDlg.Network.click()
# ----- Connect To Printer Dialog ----
# Select a checkbox
app.ConnectToPrinter.ExpandByDefault.check()
app.ConnectToPrinter.ExpandByDefault.uncheck()
# try doing the same by using click
app.ConnectToPrinter.ExpandByDefault.click()
app.ConnectToPrinter.ExpandByDefault.click()
# close the dialog
app.ConnectToPrinter.Cancel.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.Properties.click()
doc_props = app.window(name_re=".*Properties$")
doc_props.wait('exists', timeout=40)
#
# # ----- Document Properties Dialog ----
# # some tab control selections
# # Two ways of selecting tabs with indices...
# doc_props.TabCtrl.select(0)
# doc_props.TabCtrl.select(1)
# try:
# doc_props.TabCtrl.select(2)
# except IndexError:
# # not all users have 3 tabs in this dialog
# pass
#
# # or with text...
# #doc_props.TabCtrl.select("PaperQuality")
# doc_props.TabCtrl.select(1)
#
# try:
# #doc_props.TabCtrl.select("JobRetention")
# doc_props.TabCtrl.select("3")
# except MatchError:
# # some people do not have the "Job Retention" tab
# pass
#
# doc_props.TabCtrl.select("Finishing")
# #doc_props.TabCtrl.select(0)
#
# # do some radio button clicks
# doc_props.RotatedLandscape.click()
# doc_props.BackToFront.click()
# doc_props.FlipOnShortEdge.click()
#
# doc_props.Portrait.click()
# doc_props._None.click()
# #doc_props.FrontToBack.click()
#
# # open the Advanced options dialog in two steps
# advbutton = doc_props.Advanced
# advbutton.click()
#
# # close the 4 windows
#
# # ----- Advanced Options Dialog ----
# app.window(name_re = ".* Advanced Options").Ok.click()
# ----- Document Properties Dialog again ----
doc_props.Cancel.close_click()
# for some reason my current printer driver
# window does not close cleanly :(
if doc_props.Cancel.Exists():
doc_props.OK.close_click()
# ----- 2nd Page Setup Dialog again ----
app.PageSetupDlg.OK.close_click()
# ----- Page Setup Dialog ----
app.PageSetupDlg.Ok.close_click()
# type some text - note that extended characters ARE allowed
app.Notepad.Edit.set_edit_text(u"I am typing s\xe4me text to Notepad\r\n\r\n"
"And then I am going to quit")
app.Notepad.Edit.right_click()
app.Popup.menu_item("Right To Left Reading Order").click()
#app.PopupMenu.menu_select("Paste", app.Notepad.ctrl_())
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select(
# "Right To Left Reading Order", app.Notepad.ctrl_())
#app.PopupMenu.menu_select(
# "Show unicode control characters", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select("Right To Left Reading Order", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.right_click()
#app.PopupMenu.menu_select(
# "Insert Unicode control character -> IAFS", app.Notepad.ctrl_())
#time.sleep(1)
#app.Notepad.Edit.type_keys("{ESC}")
# the following shows that Sendtext does not accept
# accented characters - but does allow 'control' characters
app.Notepad.Edit.type_keys(u"{END}{ENTER}SendText d\xf6\xe9s "
u"s\xfcpp\xf4rt \xe0cce\xf1ted characters!!!", with_spaces = True)
# Try and save
app.Notepad.menu_select("File->SaveAs")
app.SaveAs.EncodingComboBox.select("UTF-8")
app.SaveAs.FileNameEdit.set_edit_text("Example-utf8.txt")
app.SaveAs.Save.close_click()
# my machine has a weird problem - when connected to the network
# the SaveAs Dialog appears - but doing anything with it can
# cause a LONG delay - the easiest thing is to just wait
# until the dialog is no longer active
# - Dialog might just be gone - because click worked
# - dialog might be waiting to disappear
# so can't wait for next dialog or for it to be disabled
# - dialog might be waiting to display message box so can't wait
# for it to be gone or for the main dialog to be enabled.
# while the dialog exists wait upto 30 seconds (and yes it can
# take that long on my computer sometimes :-( )
app.SaveAsDialog2.Cancel.wait_not('enabled')
# If file exists - it asks you if you want to overwrite
try:
app.SaveAs.Yes.wait('exists').close_click()
except pywinauto.MatchError:
print('Skip overwriting...')
# exit notepad
app.Notepad.menu_select("File->Exit")
if not run_with_appdata:
app.WriteAppData(os.path.join(scriptdir, "Notepad_fast.pkl"))
print("That took %.3f to run"% (time.time() - start))
if __name__ == "__main__":
run_notepad()
|
nilq/baby-python
|
python
|
# Make sure to have CoppeliaSim running, with followig scene loaded:
#
# scenes/messaging/ikMovementViaRemoteApi.ttt
#
# Do not launch simulation, then run this script
from zmqRemoteApi import RemoteAPIClient
print('Program started')
client = RemoteAPIClient()
sim = client.getObject('sim')
tipHandle = sim.getObject('/LBR4p/tip')
targetHandle = sim.getObject('/LBR4p/target')
# Set-up some movement variables:
maxVel = 0.1
maxAccel = 0.01
maxJerk = 80
# Start simulation:
sim.startSimulation()
def cb(pose,vel,accel,handle):
sim.setObjectPose(handle,-1,pose)
# Send movement sequences:
initialPose = sim.getObjectPose(tipHandle,-1)
targetPose = [0, 0, 0.85, 0, 0, 0, 1]
sim.moveToPose(-1,initialPose,[maxVel],[maxAccel],[maxJerk],targetPose,cb,targetHandle,[1,1,1,0.1])
targetPose = [
0, 0, 0.85,
-0.7071068883, -6.252754758e-08, -8.940695295e-08, -0.7071067691
]
sim.moveToPose(-1,sim.getObjectPose(tipHandle,-1),[maxVel],[maxAccel],[maxJerk],targetPose,cb,targetHandle,[1,1,1,0.1])
sim.moveToPose(-1,sim.getObjectPose(tipHandle,-1),[maxVel],[maxAccel],[maxJerk],initialPose,cb,targetHandle,[1,1,1,0.1])
sim.stopSimulation()
print('Program ended')
|
nilq/baby-python
|
python
|
import attr
from .document import Document
from .has_settings import HasSettings
from .templated import Templated
import exam_gen.util.logging as logging
log = logging.new(__name__, level="DEBUG")
@attr.s
class GradeData():
points = attr.ib(default=None)
children = attr.ib(factory=dict)
comment = attr.ib(default=None, kw_only = True)
ungraded_points = attr.ib(default=None, init=False)
weighted_points = attr.ib(default=None, init=False)
total_weight = attr.ib(default=None, init=False)
@property
def percent_grade(self):
return (self.weighted_points / self.total_weight)
@property
def percent_ungraded(self):
return (self.ungraded_points / self.total_weight)
@staticmethod
def normalise(data):
if isinstance(data, GradeData):
return data
elif isinstance(data, dict):
return GradeData(children=data)
else:
return GradeData(grade=data)
def merge(self, other):
other = GradeData.normalize(other)
if other.grade != None:
self.grade = other.grade
self.format = other.format
for (name, child) in other.children.items():
if name in self.children:
self.children[name] = GradeData.normalise(
self.children[name]).merge(child)
else:
self.children[name] = GradeData.normalize(child)
@attr.s
class Gradeable(Templated):
_weight = attr.ib(default=None, kw_only=True)
_points = attr.ib(default=None, init=False)
_comment = attr.ib(default=None, init=False)
settings.new_group(
"grade", doc=
"""
Settings covering how grades are managed for this problem.
""")
settings.grade.new_value(
"max_points", default=1, doc=
"""
The maximum number of points that can be assigned to problem
""")
settings.grade.new_value(
"weight", default=None, doc=
"""
The weight of this problem relative to others in exam. If `None`, this
is assumed to be the same as `settings.grade.max_points`.
""")
def __attrs_post_init__(self):
if hasattr(super(Gradeable,self), '__attrs_post_init__'):
super(Gradeable,self).__attrs_post_init__()
# stupid way of sneaking an init parameter into the settings
if self._weight != None:
self.settings.grade.weight = self._weight
# need this for a semi-responsive default setting
if self.settings.grade.weight == None:
self.settings.grade.weight = self.settings.grade.max_points
def set_points(self, points, comment=None):
if len(self.questions) > 0:
raise RuntimeError("Cannot assign grade to doc with sub-questions")
if points != None:
self._points = points
if self._points > self.settings.grade.max_points:
raise RuntimeError("Assigned grade larger than max_points allowed")
if comment != None:
self._comment = comment
@property
def ungraded(self):
return self._points == None
@property
def percent_grade(self):
"""
returns a grade from between 0 and 1
"""
return (self._points / self.settings.grade.max_points)
@property
def weighted_grade(self):
"""
returns a grade after weighting
"""
return (self.settings.grade.weight * self.percent_grade)
@property
def total_weight(self):
return self.settings.grade.weight
def build_template_spec(self, build_info):
spec = super(Gradeable, self).build_template_spec(
build_info)
grades = dict()
if self._points != None:
grades['points'] = self._points
if self._comment != None:
grades['comment'] = self._comment
if grades != {}:
spec.context['grade'] = grades
return spec
def distribute_scores(obj , grades):
"""
Takes a document and splits out all the grade information in an
`GradeData` to it's children.
"""
# Check if valid
if not isinstance(obj, Document):
raise RuntimeError("Can't distribute grades to non-document")
# for convinience allow the user to supply grades or points directly
grades = GradeData.normalize(grades)
# Copy out basic grades
if isinstance(obj, Gradeable):
obj.set_points(grades.points, comment=grade.comment)
elif grades.points != None:
raise RuntimeError("Trying to set grade on non-gradeable doc.")
# apply to children
for (name, sub_q) in obj.questions.items():
if name in grades.children:
distribute_grades(sub_q, grades.children[name])
# get extra keys and throw error if any
extra = [k for k in grades.children.keys() if k not in obj.questions]
if len(extra) != 0:
raise RuntimeError(
"Tried to supply grades for non-existent children : ".format(
extra
))
def collect_grades(obj):
"""
Goes through a document and gathers the grade info from all the
sub-elements, keeping track of grade and weight
"""
grade_data = GradeData()
# check if valid
if not isinstance(obj, Document):
raise RuntimeError("Can't gather grades from non-document")
if isinstance(obj, Gradeable):
grade_data.points = obj._points
grade_data.comment = obj._comment
# Either sum up the information from the sub-questions
if len(obj.questions) != 0:
grade_data.ungraded_points = 0
grade_data.weighted_points = 0
grade_data.total_weight = 0
for (name, sub_q) in obj.questions.items():
sub_data = collect_grades(sub_q)
grade_data.children[name] = sub_data
grade_data.total_weight += sub_data.total_weight
grade_data.ungraded_points += sub_data.ungraded_points
grade_data.weighted_points += sub_data.weighted_points
# or just use the leaf question's data
else:
grade_data.total_weight = obj.total_weight
if obj.ungraded:
grade_data.weighted_points = 0
grade_data.ungraded_points = obj.total_weight
else:
grade_data.weighted_points = obj.weighted_grade
grade_data.ungraded_points = 0
return grade_data
|
nilq/baby-python
|
python
|
import sys
def op(arg1, arg2):
if (len(sys.argv) != 3):
raise Exception("InputError: only numbers\n\n")
if (arg1.isdigit() and arg2.isdigit()):
arg1 = int(arg1)
arg2 = int(arg2)
else:
raise Exception("InputError: only numbers\n\n")
print("Sum: ", arg1 + arg2)
print("Difference: ", arg1 - arg2)
print("Product: ", arg1 * arg2)
try:
print("Quotient: ", arg1 / arg2)
except Exception as e:
print ("Quotient: ERROR (", e, ")")
try:
print("Remainder: ", arg1 % arg2)
except Exception as e:
print ("Remainder: ERROR (", e, ")")
try:
op(sys.argv[1], sys.argv[2])
except IndexError:
print("Usage: python3 operations.py <number1> <number2> Example:\n\tpython3 operations.py 10 3")
except Exception as e:
print(e, "Usage: python3 operations.py <number1> <number2> Example:\n\tpython3 operations.py 10 3")
|
nilq/baby-python
|
python
|
def xprop(layout, data, prop, enabled=True, **kwargs):
attrs = getattr(data.bl_rna, prop)[1]
name = attrs.get('name', prop)
lay = layout.row().split(percentage=0.33)
lay.label(name + ':')
lay = lay.row(align=True)
lay_l = lay.row(align=True)
lay_r = lay
if not enabled:
lay = lay.split(align=True)
lay.enabled = False
lay.prop(data, prop, text='', **kwargs)
return lay_l, lay_r
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='pyledsign',
version='1.01',
description='pyledsign - control led signs from python',
author='Kerry Schwab',
author_email='sales@brightsigns.com',
url='http://www.python.org/tbd/',
packages=['pyledsign'],
)
|
nilq/baby-python
|
python
|
from django.conf import settings
from django.http import Http404
from django.shortcuts import redirect, render
from .models import Link
def redirect_(request, key):
try:
link = Link.find_by_key(key.lower())
except Link.DoesNotExist:
raise Http404("Link does not exist.")
return redirect(link.url, permanent=settings.PERMANENT_REDIRECT)
def homepage(request):
return render(request, "homepage.html")
|
nilq/baby-python
|
python
|
# Generated by Django 2.1 on 2018-08-08 04:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Email',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.PositiveSmallIntegerField(choices=[(1, 'Pending'), (2, 'Sent'), (3, 'Failed'), (4, 'Cancelled')], default=1)),
('status_updated', models.DateTimeField()),
('queued_until', models.DateTimeField(blank=True, null=True)),
('email_type', models.CharField(max_length=191)),
('sent_from', models.CharField(max_length=255)),
('subject', models.CharField(max_length=255)),
('recipients', models.TextField()),
('cc_to', models.TextField(blank=True, default='')),
('bcc_to', models.TextField(blank=True, default='')),
('reply_to', models.TextField(blank=True, default='')),
('text', models.TextField()),
('html', models.TextField(blank=True, default='')),
('error_message', models.TextField(blank=True, default='')),
('task_scheduler_id', models.CharField(blank=True, db_index=True, default='', editable=False, max_length=255)),
('related_obj_id', models.PositiveIntegerField(blank=True, editable=False, null=True)),
('related_obj_content_type', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType')),
],
options={
'ordering': ('-status_updated',),
},
),
]
|
nilq/baby-python
|
python
|
"""Tests the DNC class implementation."""
import sonnet as snt
import tensorflow as tf
import unittest
from numpy.testing import assert_array_equal
from .. dnc import dnc
def suite():
"""Create testing suite for all tests in this module."""
suite = unittest.TestSuite()
suite.addTest(DNCTest('test_construction'))
return suite
class DNCTest(unittest.TestCase):
"""Tests for the DNC class."""
def test_construction(self):
"""Test the construction of a DNC."""
output_size = 10
d = dnc.DNC(output_size)
self.assertIsInstance(d, dnc.DNC)
def test_build(self):
"""Test the build of the DNC."""
graph = tf.Graph()
with graph.as_default():
with tf.Session(graph=graph) as sess:
output_size = 10
memory_size = 20
word_size = 8
num_read_heads = 3
hidden_size = 1
tests = [{ # batch_size = 1
'input': [[1, 2, 3]],
'batch_size': 1
}, { # batch_size > 1
'input': [[1, 2, 3], [4, 5, 6]],
'batch_size': 2,
}, { # can handle 2D input with batch_size > 1
'input': [[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[9, 8, 7],
[6, 5, 4],
[3, 2, 1]]],
'batch_size': 2,
}, { # 3D input with batch_size > 1
'input': [[[[1], [2]], [[3], [4]]],
[[[5], [6]], [[7], [8]]]],
'batch_size': 2,
}]
for test in tests:
i = tf.constant(test['input'], dtype=tf.float32)
batch_size = test['batch_size']
d = dnc.DNC(
output_size,
memory_size=memory_size,
word_size=word_size,
num_read_heads=num_read_heads,
hidden_size=hidden_size)
prev_state = d.initial_state(batch_size, dtype=tf.float32)
output_vector, dnc_state = d(i, prev_state)
assert_array_equal([batch_size, output_size],
sess.run(tf.shape(output_vector)))
assert_array_equal(
[batch_size, num_read_heads, word_size],
sess.run(tf.shape(dnc_state.read_vectors)))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
nilq/baby-python
|
python
|
# Source : https://leetcode.com/problems/lowest-common-ancestor-of-a-binary-tree/
# Author : henrytine
# Date : 2020-08-19
#####################################################################################################
#
# Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
#
# According to the definition of LCA on Wikipedia: "The lowest common ancestor is defined between two
# nodes p and q as the lowest node in T that has both p and q as descendants (where we allow a node
# to be a descendant of itself).”
#
# Given the following binary tree: root = [3,5,1,6,2,0,8,null,null,7,4]
#
# Example 1:
#
# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 1
# Output: 3
# Explanation: The LCA of nodes 5 and 1 is 3.
#
# Example 2:
#
# Input: root = [3,5,1,6,2,0,8,null,null,7,4], p = 5, q = 4
# Output: 5
# Explanation: The LCA of nodes 5 and 4 is 5, since a node can be a descendant of itself according to
# the LCA definition.
#
# Note:
#
# All of the nodes' values will be unique.
# p and q are different and both values will exist in the binary tree.
#
#####################################################################################################
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if root in (None, p, q): return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left is None:
return right
elif right is None:
return left
else:
return root
# return self.helper(root, p, q)
# def helper(self, node, p, q):
# if node in (None, p, q):
# return node
# left = self.helper(node.left, p, q)
# right = self.helper(node.right, p, q)
# if left is None:
# return right
# elif right is None:
# return left
# else:
# return node
|
nilq/baby-python
|
python
|
import pymysql
import urllib.request
from bs4 import BeautifulSoup
import requests
def connectDatabase():
"""Create database connection"""
global db
db = pymysql.connect(host='localhost', user='root', password='',
db='vg_dapi', cursorclass=pymysql.cursors.DictCursor,charset='utf8')
def getappid(appid_games_list, name):
""" Function responsable to get the App ID of a game, given a name"""
for i in appid_games_list:
if i['name'] == name:
print(name + " App ID: " + str(i['appid']))
return i['appid']
def getgameinfo(urlsteam, appid, vgnamesteam):
pageurl = urllib.request.Request(urlsteam + str(appid))
#Query the website and return the html to the variable 'page'
page = urllib.request.urlopen(pageurl)
#Parse the html in the 'page' variable, and store it in Beautiful Soup format
soup = BeautifulSoup(page, "lxml")
reviews = soup.find('span', class_='nonresponsive_hidden responsive_reviewdesc')
if reviews is None:
pass
else:
vgsteamscores_list = [appid, reviews.text, vgnamesteam]
vgsteamscores_sql = "UPDATE `gameplatform` SET `steamID` = %s, `steam_score` = %s WHERE (SELECT `id` FROM `game` WHERE `name` = %s) = `gameID`"
cur.execute(vgsteamscores_sql, vgsteamscores_list)
db.commit()
if __name__ == '__main__':
url = "http://store.steampowered.com/app/"
#request responsable to return a json object with all the steam games
r = requests.get('https://api.steampowered.com/ISteamApps/GetAppList/v2/')
#store appID and Names of the games into a List
gameslist = r.json()['applist']['apps']
connectDatabase()
cur = db.cursor()
cur.execute("SELECT name FROM game")
vgnames_list = cur.fetchall()
for vgname in vgnames_list:
if getappid(gameslist, vgname['name']) is None:
pass
else:
appidgame = getappid(gameslist, vgname['name'])
getgameinfo(url, appidgame, vgname['name'])
|
nilq/baby-python
|
python
|
from mycroft import MycroftSkill, intent_file_handler
class RoomBooking(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('booking.room.intent')
def handle_booking_room(self, message):
amount = message.data.get('amount')
building = message.data.get('building')
time = message.data.get('time')
self.speak_dialog('booking.room', data={
'time': time,
'amount': amount,
'building': building
})
def create_skill():
return RoomBooking()
|
nilq/baby-python
|
python
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
class DialogEvent:
def __init__(self, bubble: bool = False, name: str = "", value: object = None):
self.bubble = bubble
self.name = name
self.value: object = value
|
nilq/baby-python
|
python
|
import traceback
from twisted.internet import reactor
def stack():
print("The Python Stack.")
traceback.print_stack()
reactor.callWhenRunning(stack)
reactor.run()
|
nilq/baby-python
|
python
|
import os
import sys
import codecs
import difflib
sys.path.insert(0, os.path.dirname(__file__))
from logger import log
def restore_file_case(text_file, orig_file, debug=False):
text_io = codecs.open(text_file, 'r', encoding='utf8')
orig_io = codecs.open(orig_file, 'r', encoding='utf8')
for line in text_io:
orig_line = orig_io.next()
result = restore_sentence_case(line.strip(), orig_line.strip(), debug)
assert result.lower() == line.strip().lower(), \
"Case restoration changed a sentence!\n{}\n{}" \
.format(line.strip(), result)
yield result.encode('utf8', 'replace')
text_io.close()
orig_io.close()
def restore_sentence_case(sent, orig_sent, debug=False):
if debug and sent != orig_sent:
log.debug(u'toks: {}'.format(sent).encode('utf8', 'replace'))
log.debug(u'orig: {}'.format(orig_sent).encode('utf8', 'replace'))
toks = sent.split()
orig_toks = orig_sent.split()
lc_toks = [tok.lower() for tok in toks]
lc_orig_toks = [tok.lower() for tok in orig_toks]
matcher = difflib.SequenceMatcher(None, lc_toks, lc_orig_toks)
new_toks = []
for tag, i1, i2, j1, j2 in matcher.get_opcodes():
if debug and tag != 'equal' and sent != orig_sent:
log.debug(u" {}: ({},{}) '{}' -> ({},{}) '{}'" \
.format(tag,
i1, i2, ' '.join(toks[i1:i2]),
j1, j2, ' '.join(orig_toks[j1:j2])) \
.encode('utf8', 'replace'))
if tag == 'equal':
new_toks += orig_toks[j1:j2]
elif tag == 'replace':
word = ' '.join(toks[i1:i2])
orig_word = ' '.join(orig_toks[j1:j2])
new_toks += [restore_word_case(word, orig_word)]
elif tag == 'delete':
if i1 == 0:
tmp = toks[i1:i2]
if is_capitalized(orig_toks[0]):
orig_toks[0] = orig_toks[0].lower()
tmp[0] = tmp[0].capitalize()
elif is_uppercased(orig_toks[0]):
tmp[0] = tmp[0].capitalize()
new_toks += tmp
else:
new_toks += toks[i1:i2]
elif tag == 'insert':
if i1 == 0 and is_capitalized(orig_toks[j1]) and \
is_lowercased(orig_toks[j2]):
orig_toks[j2] = orig_toks[j2].capitalize()
new_sent = ' '.join(new_toks)
if debug and sent != orig_sent:
log.debug("sent: {}".format(new_sent))
return new_sent
def restore_word_case(tok, orig_tok):
if tok.lower() == orig_tok.lower():
return orig_tok
if is_lowercased(orig_tok):
return tok.lower()
elif is_uppercased(orig_tok):
return tok.upper()
elif is_capitalized(orig_tok):
return tok.capitalize()
else:
return tok
def is_lowercased(tok):
return tok == tok.lower()
def is_uppercased(tok):
return tok == tok.upper()
def is_capitalized(tok):
return tok == tok.capitalize()
|
nilq/baby-python
|
python
|
"""Test for our weighted graph."""
# {'A': {'B': 7, 'C': 9}, 'B': {'D': 2, 'E': 4}, 'C': {'F':6}}
"""Test our graph implementation."""
import pytest
from weighted_graph import Weighted
@pytest.fixture
def new_weighted_graph():
"""Graph for testing."""
from weighted_graph import Weighted
empty_graph = Weighted()
return empty_graph
@pytest.fixture
def graph_no_edges():
"""Test graph with nodes only."""
from weighted_graph import Weighted
example_graph = Weighted()
example_graph.add_node('BB')
example_graph.add_node(82)
example_graph.add_node(99)
example_graph.add_node('AA')
return example_graph
@pytest.fixture
def graph_with_edges():
"""Test graph with nodes only."""
from weighted_graph import Weighted
new_graph = Weighted()
new_graph.add_node('A')
new_graph.add_node('B')
new_graph.add_node('C')
new_graph.add_node('D')
new_graph.add_node('E')
new_graph.add_node('F')
new_graph.add_edge('A', 'B', 7)
new_graph.add_edge('A', 'C', 9)
new_graph.add_edge('B', 'D', 2)
new_graph.add_edge('B', 'E', 4)
new_graph.add_edge('C', 'F', 6)
return new_graph
def test_graph_init_no_values_taken():
"""Ensure we raise an error if we try to init with a value."""
from weighted_graph import Weighted
with pytest.raises(TypeError):
a_graph = Weighted(2)
def test_graph_init_success(new_weighted_graph):
"""Ensure our new graph is in fact a graph."""
assert isinstance(new_weighted_graph, Weighted)
def test_graph_adds_and_lists_nodes(graph_no_edges):
"""Ensure we get list of nodes."""
listy = ['BB', 82, 99, 'AA']
for node in listy:
assert node in graph_no_edges.nodes()
def test_graph_adds_nodes_and_edges(graph_no_edges):
"""Ensure we add edges to the nodes."""
graph_no_edges.add_edge('Louisiana Crawfish', 'WA Invasive Species', 3)
assert graph_no_edges.edges() == [(
'Louisiana Crawfish', 'WA Invasive Species', 3)]
def test_graph_lists_adds_and_lists_edges(graph_no_edges):
"""Ensure we add edges to the nodes."""
graph_no_edges.add_edge(82, 34, 4)
graph_no_edges.add_edge(99, 'AA', 6)
assert (82, 34, 4) in graph_no_edges.edges()
assert (99, 'AA', 6) in graph_no_edges.edges()
def test_graph_deletes_nodes(graph_with_edges):
"""Ensure we can delete a node."""
graph_with_edges.del_nodes('B')
listy = ['A', 'C', 'D', 'E', 'F']
for node in listy:
assert node in graph_with_edges.nodes()
assert 'B' not in graph_with_edges.nodes()
def test_graph_cant_delete_an_unpresent_node(graph_no_edges):
"""Ensure we can't delete that doesn't exist."""
with pytest.raises(ValueError):
graph_no_edges.del_nodes(3.14)
def test_graph_cant_delete_without_argument(graph_no_edges):
"""Ensure we can't delete without an argument."""
with pytest.raises(TypeError):
graph_no_edges.del_nodes()
def test_del_some_edges(graph_with_edges):
"""Ensure we delete edges."""
graph_with_edges.del_edges('A', 'B')
assert graph_with_edges['A'] == {'C': 9}
def test_cant_delete_nonexistent_edge(graph_with_edges):
"""Ensure we can't delete a nonexistent edge."""
with pytest.raises(KeyError):
graph_with_edges.del_edges('BB', 'Badgers')
def test_nodes_exist(graph_no_edges):
"""Ensure we can assert nodes are in a graph."""
for node in graph_no_edges:
assert graph_no_edges.has_node(node)
def test_false_if_no_node(graph_no_edges):
"""Ensure we get false."""
false_nodes = ['land submarine', 'Portland Timbers', 'tug cable scope', 100]
for node in false_nodes:
assert graph_no_edges.has_node(node) is False
def test_node_neighbors(graph_no_edges):
"""Ensure we get the right neighbors for a node."""
graph_no_edges.add_edge('BB', 82, 5)
assert graph_no_edges.neighbors('BB') == {82: 5}
def test_node_without_neighbors(graph_no_edges):
"""Ensure we get None back for neighbors."""
assert graph_no_edges.neighbors(99) == {}
def test_node_error_if_nonpresent(graph_no_edges):
"""Can not get neighbors of nonpresent node."""
with pytest.raises(ValueError):
graph_no_edges.adjacent('Raccoon', 'Rocket')
def test_adjacent_nodes(graph_with_edges):
"""Ensure we get adjacent edges."""
assert graph_with_edges.adjacent('A', 'B')
def test_adjacent_none(graph_with_edges):
"""Ensure we get false."""
assert graph_with_edges.adjacent('B', 'A') is False
def test_adjacent_unpresent(graph_with_edges):
"""Ensure we get an error."""
with pytest.raises(ValueError):
graph_with_edges.adjacent('Captain Picard', 'Star Wars')
def test_add_node_value_error_val_exists(graph_no_edges):
"""Ensure a value is not added twice."""
with pytest.raises(ValueError):
graph_no_edges.add_node('BB')
def test_del_edges_has_no_edges_to_delete(graph_with_edges):
"""Ensure there are no edges to delete."""
with pytest.raises(KeyError):
graph_with_edges.del_edges('F', 'G')
def test_neighbors_value_error_not_in_graph(graph_with_edges):
"""Ensure the value error raises if no neighbors."""
with pytest.raises(ValueError):
graph_with_edges.neighbors('G')
@pytest.fixture
def dijkstra_alg():
"""Test dijkstra method."""
from weighted_graph import Weighted
new_graph = Weighted()
new_graph.add_node('0')
new_graph.add_node('1')
new_graph.add_node('2')
new_graph.add_node('3')
new_graph.add_node('4')
new_graph.add_node('5')
new_graph.add_edge('0', '1', 1)
new_graph.add_edge('0', '2', 7)
new_graph.add_edge('1', '3', 9)
new_graph.add_edge('1', '5', 15)
new_graph.add_edge('2', '4', 4)
new_graph.add_edge('3', '5', 5)
new_graph.add_edge('3', '4', 10)
new_graph.add_edge('4', '5', 3)
return new_graph
def test_new_graph_returns_path_to_nodes(dijkstra_alg):
"""Test that the key value pairs are correct."""
assert dijkstra_alg.dijkstra('0') == {'1': 1, '2': 7, '3': 10, '4': 11, '5': 14}
def test_new_graph_returns_path_to_other_nodes(graph_with_edges):
"""Test that the key value pairs are correct."""
assert graph_with_edges.dijkstra('A') == {'B': 7, 'C': 9, 'D': 9, 'E': 11, 'F': 15}
def test_graph_with_nodes_pointing_at_each_other():
"""."""
from weighted_graph import Weighted
new_weighted = Weighted()
new_weighted.add_node('A')
new_weighted.add_node('B')
new_weighted.add_node('C')
new_weighted.add_node('D')
new_weighted.add_node('E')
new_weighted.add_node('F')
new_weighted.add_edge('A', 'B', 7)
new_weighted.add_edge('B', 'C', 9)
new_weighted.add_edge('B', 'E', 4)
new_weighted.add_edge('E', 'D', 2)
new_weighted.add_edge('D', 'C', 2)
new_weighted.add_edge('C', 'F', 6)
new_weighted.add_edge('C', 'A', 1)
assert new_weighted.dijkstra('A') == {'B': 7, 'E': 11, 'D': 13, 'C': 15, 'F': 21}
def test_dijkstra_indext_error_raises(dijkstra_alg):
"""Ensure that index error raises for no node in graph."""
with pytest.raises(IndexError):
dijkstra_alg.dijkstra('7')
def test_bellman_ford_first_test_one():
"""Ensure we get same values as dijkstras."""
from weighted_graph import Weighted
new_weighted = Weighted()
new_weighted.add_node('A')
new_weighted.add_node('B')
new_weighted.add_node('C')
new_weighted.add_node('D')
new_weighted.add_node('E')
new_weighted.add_node('F')
new_weighted.add_edge('A', 'B', 7)
new_weighted.add_edge('B', 'C', 9)
new_weighted.add_edge('B', 'E', 4)
new_weighted.add_edge('E', 'D', 2)
new_weighted.add_edge('D', 'C', 2)
new_weighted.add_edge('C', 'F', 6)
new_weighted.add_edge('C', 'A', 1)
assert new_weighted.bellman_ford('A') == {'A': 0, 'B': 7, 'E': 11, 'D': 13, 'C': 15, 'F': 21}
# {'A': {'B': 7, 'C': 9}, 'B': {'D': 2, 'E': 4}, 'C': {'F': 6}}
def test_bellman_ford_first_test_two(dijkstra_alg):
"""Ensure we get same values as dijkstras."""
assert dijkstra_alg.bellman_ford('0') == {'0': 0, '1': 1, '2': 7, '3': 10, '4': 11, '5': 14}
# {'A': {'B': 7, 'C': 9}, 'B': {'D': 2, 'E': 4}, 'C': {'F': 6}}
def test_bellman_ford_with_negatives_one():
"""Ensure bellman works with negatives."""
from weighted_graph import Weighted
weighted = Weighted()
weighted.add_node('S')
weighted.add_node('E')
weighted.add_node('A')
weighted.add_node('D')
weighted.add_node('B')
weighted.add_node('C')
weighted.add_edge('S', 'E', 8)
weighted.add_edge('S', 'A', 10)
weighted.add_edge('E', 'D', 1)
weighted.add_edge('D', 'A', -4)
weighted.add_edge('D', 'C', -1)
weighted.add_edge('A', 'C', 2)
weighted.add_edge('C', 'B', -2)
weighted.add_edge('B', 'A', 1)
assert weighted.bellman_ford('S') == {'A': 5, 'B': 5, 'C': 7, 'D': 9, 'E': 8, 'S': 0}
def test_bellman_with_negatives_two():
"""Ensure it works with various cases of negatives."""
from weighted_graph import Weighted
weighted = Weighted()
weighted.add_node(0)
weighted.add_node(1)
weighted.add_node(2)
weighted.add_node(3)
weighted.add_node(4)
weighted.add_node(5)
weighted.add_edge(0, 1, 5)
weighted.add_edge(0, 2, 3)
weighted.add_edge(1, 3, 7)
weighted.add_edge(2, 3, -2)
weighted.add_edge(3, 0, 8)
weighted.add_edge(3, 4, 3)
weighted.add_edge(4, 5, 6)
weighted.add_edge(0, 5, 4)
assert weighted.bellman_ford(0) == {0: 0, 1: 5, 2: 3, 3: 1, 4: 4, 5: 4}
|
nilq/baby-python
|
python
|
import os.path
from unittest import TestCase
from pkg_resources import require, DistributionNotFound
from subprocess import call
from sys import platform, executable, exit
from src.info import AppInfo
try:
REQUIRED = open(os.path.join(AppInfo.root_dir, "requirements.txt")).read()
except Exception as e:
raise Exception(
f"Failed to locate requirements file. Maybe it was deleted?\n\n{str(e)}"
)
class Requirements(TestCase):
"""
Instance, solely here to ensure that all necessary
dependencies are installed.
"""
def test_req(self):
missing = []
requirements = self.extract_req(REQUIRED)
for _requirement in requirements:
_requirement = str(_requirement).strip()
with self.subTest(requirement=_requirement):
try:
require(_requirement)
except DistributionNotFound:
missing.append(_requirement)
return missing
def install_reqs(self, missing):
acceptable = {"y", "n", "yes", "no"}
answer = input(
"\n\033[96mDo you wish to install the aforementioned missing packages? [y/n]:\033[0m "
)
if answer.lower() in acceptable:
if "y" in answer.lower():
print("\n\n")
for missed in missing:
self.req(missed, acceptable)
print("\n\033[92mSuccessfully installed required dependencies!\033[0m")
else:
print("Exited successfully.")
exit(0)
def req(self, requirement, acceptable, heading=""):
if not heading:
heading = "\033[4m\033[91mNOTE: This is not an optional package."
ans = input(
f'{heading}\033[0m\033[96m\nAre you sure you want to install "{requirement}"? [y/n]:\033[0m '
)
if ans.lower() in acceptable:
if "y" in ans.lower():
call([executable, "-m", "pip", "install", requirement])
print("\n\n")
else:
print("\n")
extra = (
"\033[1m\033[91mThis package is not optional.\033[0m"
+ "\033[1m\033[91m You must install it.\033[0m"
)
self.req(requirement, acceptable, heading=extra)
else:
invalid = (
"\n\033[1m\033[91mInvalid option. "
+ 'Please use only "yes", "no", "y" or "n" to answer.'
)
self.req(requirement, acceptable, heading=invalid)
def extract_req(self, requirements):
deps = []
for requirement in [
r for r in requirements.split("\n") if r and r != " " and not "#" in r
]:
# Requirement, conditions
r, c = requirement.split(";")
sys_platform = ""
if "sys_platform" in c.lower():
sys_platform = c.split("sys_platform == ")[1][:-1].split("'")[1]
if sys_platform and not platform.lower() == sys_platform:
continue
deps.append(r)
return deps
|
nilq/baby-python
|
python
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from typing import Any, Callable, Dict, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DataLoader, Sampler
from flash.core.data.io.input import DataKeys, Input
from flash.core.model import Task
from flash.core.registry import FlashRegistry
from flash.core.utilities.apply_func import get_callable_dict
from flash.core.utilities.types import LOSS_FN_TYPE, LR_SCHEDULER_TYPE, METRICS_TYPE, OPTIMIZER_TYPE
from flash.pointcloud.detection.backbones import POINTCLOUD_OBJECT_DETECTION_BACKBONES
__FILE_EXAMPLE__ = "pointcloud_detection"
class PointCloudObjectDetector(Task):
"""The ``PointCloudObjectDetector`` is a :class:`~flash.core.classification.ClassificationTask` that classifies
pointcloud data.
Args:
num_classes: The number of classes (outputs) for this :class:`~flash.core.model.Task`.
backbone: The backbone name (or a tuple of ``nn.Module``, output size) to use.
backbone_kwargs: Any additional kwargs to pass to the backbone constructor.
loss_fn: The loss function to use. If ``None``, a default will be selected by the
:class:`~flash.core.classification.ClassificationTask` depending on the ``multi_label`` argument.
optimizer: Optimizer to use for training.
lr_scheduler: The LR scheduler to use during training.
metrics: Any metrics to use with this :class:`~flash.core.model.Task`. If ``None``, a default will be selected
by the :class:`~flash.core.classification.ClassificationTask` depending on the ``multi_label`` argument.
learning_rate: The learning rate for the optimizer.
lambda_loss_cls: The value to scale the loss classification.
lambda_loss_bbox: The value to scale the bounding boxes loss.
lambda_loss_dir: The value to scale the bounding boxes direction loss.
"""
backbones: FlashRegistry = POINTCLOUD_OBJECT_DETECTION_BACKBONES
required_extras: str = "pointcloud"
def __init__(
self,
num_classes: int,
backbone: Union[str, Tuple[nn.Module, int]] = "pointpillars_kitti",
backbone_kwargs: Optional[Dict] = None,
loss_fn: LOSS_FN_TYPE = None,
optimizer: OPTIMIZER_TYPE = "Adam",
lr_scheduler: LR_SCHEDULER_TYPE = None,
metrics: METRICS_TYPE = None,
learning_rate: float = 1e-2,
lambda_loss_cls: float = 1.0,
lambda_loss_bbox: float = 1.0,
lambda_loss_dir: float = 1.0,
):
super().__init__(
model=None,
loss_fn=loss_fn,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
metrics=metrics,
learning_rate=learning_rate,
)
self.save_hyperparameters()
if backbone_kwargs is None:
backbone_kwargs = {}
if isinstance(backbone, tuple):
self.backbone, out_features = backbone
else:
self.model, out_features, self.collate_fn = self.backbones.get(backbone)(**backbone_kwargs)
self.backbone = self.model.backbone
self.neck = self.model.neck
self.loss_fn = get_callable_dict(self.model.loss)
if __FILE_EXAMPLE__ not in sys.argv[0]:
self.model.bbox_head.conv_cls = self.head = nn.Conv2d(
out_features, num_classes, kernel_size=(1, 1), stride=(1, 1)
)
def compute_loss(self, losses: Dict[str, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
losses = losses["loss"]
return (
self.hparams.lambda_loss_cls * losses["loss_cls"]
+ self.hparams.lambda_loss_bbox * losses["loss_bbox"]
+ self.hparams.lambda_loss_dir * losses["loss_dir"]
)
def compute_logs(self, logs: Dict[str, Any], losses: Dict[str, torch.Tensor]):
logs.update({"loss": self.compute_loss(losses)})
return logs
def training_step(self, batch: Any, batch_idx: int) -> Any:
return super().training_step((batch, batch), batch_idx)
def validation_step(self, batch: Any, batch_idx: int) -> Any:
super().validation_step((batch, batch), batch_idx)
def test_step(self, batch: Any, batch_idx: int) -> Any:
super().validation_step((batch, batch), batch_idx)
def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any:
results = self.model(batch)
boxes = self.model.inference_end(results, batch)
return {
DataKeys.INPUT: getattr(batch, "point", None),
DataKeys.PREDS: boxes,
DataKeys.METADATA: [a["name"] for a in batch.attr],
}
def forward(self, x) -> torch.Tensor:
"""First call the backbone, then the model head."""
# hack to enable backbone to work properly.
self.model.device = self.device
return self.model(x)
def _process_dataset(
self,
dataset: Input,
batch_size: int,
num_workers: int,
pin_memory: bool,
collate_fn: Callable,
shuffle: bool = False,
drop_last: bool = True,
sampler: Optional[Sampler] = None,
**kwargs
) -> DataLoader:
dataset.input_transform_fn = self.model.preprocess
dataset.transform_fn = self.model.transform
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
collate_fn=collate_fn,
shuffle=shuffle,
drop_last=drop_last,
sampler=sampler,
)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import discord
import configparser
from libs import raid_combat
# Setup the config and Discord client
config = configparser.RawConfigParser()
config.read('config.conf')
client = discord.Client()
# create the dict of combat managers for each server
combat_managers = {}
@client.event
async def on_ready():
"""
Fires when the account is logged in.
:return:
"""
print('Logged in as {} with the ID {}\n'.format(client.user.name, client.user.id))
# setup a combat manager for each server connected
for server in client.servers:
combat_managers[server.name] = raid_combat.CombatManager(client, server)
@client.async_event
async def on_message(message):
"""
Fires when a message is received.
:param message: Discord message object
:return:
"""
if message.content == '!test':
await combat_managers[message.server.name].start_combat()
@client.async_event
async def on_reaction_add(reaction, user):
# await client.send_message(reaction.message.channel, "{} reacted with {}".format(user.name, reaction.emoji))
if client.user != user:
await combat_managers[reaction.message.server.name].route_action(reaction, user)
await client.remove_reaction(reaction.message, reaction.emoji, user)
if __name__ == '__main__':
token = config.get('Account', 'token')
client.run(token)
|
nilq/baby-python
|
python
|
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
apikey = 'mykey'
secretkey = 'mysecret'
Driver = get_driver(Provider.AURORACOMPUTE)
conn = Driver(key=apikey, secret=secretkey)
|
nilq/baby-python
|
python
|
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import BinaryIO, Iterable, Sequence, Tuple
from bentoml.adapters.file_input import FileInput
from bentoml.adapters.utils import (
check_file_extension,
get_default_accept_image_formats,
)
from bentoml.types import InferenceTask
from bentoml.utils.lazy_loader import LazyLoader
# BentoML optional dependencies, using lazy load to avoid ImportError
imageio = LazyLoader('imageio', globals(), 'imageio')
numpy = LazyLoader('numpy', globals(), 'numpy')
ApiFuncArgs = Tuple[
Sequence['numpy.ndarray'],
]
class ImageInput(FileInput):
"""Transform incoming image data from http request, cli or lambda event into numpy
array.
Handle incoming image data from different sources, transform them into numpy array
and pass down to user defined API functions
* If you want to operate raw image file stream or PIL.Image objects, use lowlevel
alternative FileInput.
Args:
accept_image_formats (string[]): A list of acceptable image formats.
Default value is loaded from bentoml config
'apiserver/default_image_input_accept_file_extensions', which is
set to ['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp'] by default.
List of all supported format can be found here:
https://imageio.readthedocs.io/en/stable/formats.html
pilmode (string): The pilmode to be used for reading image file into numpy
array. Default value is 'RGB'. Find more information at:
https://imageio.readthedocs.io/en/stable/format_png-pil.html
Raises:
ImportError: imageio package is required to use ImageInput
Example:
>>> from bentoml import BentoService, api, artifacts
>>> from bentoml.frameworks.tensorflow import TensorflowSavedModelArtifact
>>> from bentoml.adapters import ImageInput
>>>
>>> CLASS_NAMES = ['cat', 'dog']
>>>
>>> @artifacts([TensorflowSavedModelArtifact('classifier')])
>>> class PetClassification(BentoService):
>>> @api(input=ImageInput())
>>> def predict(self, image_ndarrays):
>>> results = self.artifacts.classifer.predict(image_ndarrays)
>>> return [CLASS_NAMES[r] for r in results]
"""
def __init__(
self, accept_image_formats=None, pilmode="RGB", **base_kwargs,
):
assert imageio, "`imageio` dependency can be imported"
super().__init__(**base_kwargs)
if 'input_names' in base_kwargs:
raise TypeError(
"ImageInput doesn't take input_names as parameters since bentoml 0.8."
"Update your Service definition "
"or use LegacyImageInput instead(not recommended)."
)
self.pilmode = pilmode
self.accept_image_formats = set(
accept_image_formats or get_default_accept_image_formats()
)
@property
def config(self):
return {
# Converting to list, google.protobuf.Struct does not work with tuple type
"accept_image_formats": list(self.accept_image_formats),
"pilmode": self.pilmode,
}
@property
def request_schema(self):
return {
"image/*": {"schema": {"type": "string", "format": "binary"}},
"multipart/form-data": {
"schema": {
"type": "object",
"properties": {
"image_file": {"type": "string", "format": "binary"}
},
}
},
}
@property
def pip_dependencies(self):
return ["imageio"]
def extract_user_func_args(
self, tasks: Iterable[InferenceTask[BinaryIO]]
) -> ApiFuncArgs:
img_list = []
for task in tasks:
if getattr(task.data, "name", None) and not check_file_extension(
task.data.name, self.accept_image_formats
):
task.discard(
http_status=400,
err_msg=f"Current service only accepts "
f"{self.accept_image_formats} formats",
)
continue
try:
img_array = imageio.imread(task.data, pilmode=self.pilmode)
img_list.append(img_array)
except ValueError as e:
task.discard(http_status=400, err_msg=str(e))
return (img_list,)
|
nilq/baby-python
|
python
|
# app/chats/forms.py
|
nilq/baby-python
|
python
|
from django.views.generic import UpdateView, ListView
import pyperclip
from django.http import HttpResponse
from django.template.loader import render_to_string
from django.http.response import Http404
from django.shortcuts import render
from .models import Image, Categories, Location
# modal window settings
class ModalListView(ListView):
model = Image
template_name = 'welcome.html'
def get_queryset(self):
return Image.objects.all()
class ModalUpdateView(UpdateView):
model = Image
template_name = 'single_img.html'
def dispatch(self, *args, **kwargs):
self.id = kwargs['pk']
return super(ModalUpdateView, self).dispatch(*args, **kwargs)
# Create your views here.
def index(request):
title = 'sue gallery'
images = Image.objects.all()[3:9]
allimages = Image.objects.all()
image1 = Image.objects.get(id = 1)
image2 = Image.objects.get(id = 2)
image3 = Image.objects.get(id = 3)
return render(request, 'welcome.html', {'title':title, 'images':images, 'allimages':allimages,
'image1':image1, 'image2':image2, 'image3':image3})
def gallery_disp(request):
title = 'Gallery Display'
if 'location' in request.GET and request.GET['location']:
search_word = request.GET.get('location')
message = f'Filtered by Location : {search_word}'
location_images = Image.filter_by_location(search_word)
return render(request, 'gallery_display.html', {'message':message, 'images':location_images})
else:
images = Image.objects.all()
message = 'Not Filtered'
categories = Categories.objects.all()
locations = Location.objects.all()
return render (request, 'gallery_display.html', {'message':message,'title':title, 'images':images, 'categories':categories, 'locations':locations})
def single_image(request, image_id):
try:
single_image = Image.objects.get(id=image_id)
except:
raise Http404('Image Not Available')
return render(request, 'single_img.html', {'single_image': single_image})
def navbar_categories_show(request):
all_items = Categories.objects.all()
return render (request,'navbar.html', {'all_items':all_items})
def search_images(request):
title = 'Category search results'
if 'category_image' in request.GET and request.GET['category_image']:
search_term = request.GET.get('category_image')
message = f'{search_term}'
result_images = Image.search_by_category(search_term)
categories = Categories.objects.all()
return render(request, 'search_results.html', {'message':message,'title':title, 'result_images':result_images, 'categories':categories})
else:
message = 'You have not searched for anything'
return render(request, 'search_results.html', {'message':message, 'title':title})
|
nilq/baby-python
|
python
|
from os import environ
def assert_in(file, files_to_check):
if file not in files_to_check:
raise AssertionError("{} does not exist in the list".format(str(file)))
return True
def assert_in_env(check_list: list):
for item in check_list:
assert_in(item, environ.keys())
return True
|
nilq/baby-python
|
python
|
from django.contrib import messages
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from misago.admin.views import generic
from misago.users.forms.admin import RankForm
from misago.users.models import Rank
class RankAdmin(generic.AdminBaseMixin):
root_link = 'misago:admin:users:ranks:index'
model = Rank
form = RankForm
templates_dir = 'misago/admin/ranks'
message_404 = _("Requested rank does not exist.")
def update_roles(self, target, roles):
target.roles.clear()
if roles:
target.roles.add(*roles)
def handle_form(self, form, request, target):
super(RankAdmin, self).handle_form(form, request, target)
self.update_roles(target, form.cleaned_data['roles'])
class RanksList(RankAdmin, generic.ListView):
ordering = (('order', None), )
class NewRank(RankAdmin, generic.ModelFormView):
message_submit = _('New rank "%(name)s" has been saved.')
class EditRank(RankAdmin, generic.ModelFormView):
message_submit = _('Rank "%(name)s" has been edited.')
class DeleteRank(RankAdmin, generic.ButtonView):
def check_permissions(self, request, target):
message_format = {'name': target.name}
if target.is_default:
message = _('Rank "%(name)s" is default rank and can\'t be deleted.')
return message % message_format
if target.user_set.exists():
message = _('Rank "%(name)s" is assigned to users and can\'t be deleted.')
return message % message_format
def button_action(self, request, target):
target.delete()
message = _('Rank "%(name)s" has been deleted.')
messages.success(request, message % {'name': target.name})
class MoveDownRank(RankAdmin, generic.ButtonView):
def button_action(self, request, target):
try:
other_target = Rank.objects.filter(order__gt=target.order)
other_target = other_target.earliest('order')
except Rank.DoesNotExist:
other_target = None
if other_target:
other_target.order, target.order = target.order, other_target.order
other_target.save(update_fields=['order'])
target.save(update_fields=['order'])
message = _('Rank "%(name)s" has been moved below "%(other)s".')
targets_names = {'name': target.name, 'other': other_target.name}
messages.success(request, message % targets_names)
class MoveUpRank(RankAdmin, generic.ButtonView):
def button_action(self, request, target):
try:
other_target = Rank.objects.filter(order__lt=target.order)
other_target = other_target.latest('order')
except Rank.DoesNotExist:
other_target = None
if other_target:
other_target.order, target.order = target.order, other_target.order
other_target.save(update_fields=['order'])
target.save(update_fields=['order'])
message = _('Rank "%(name)s" has been moved above "%(other)s".')
targets_names = {'name': target.name, 'other': other_target.name}
messages.success(request, message % targets_names)
class RankUsers(RankAdmin, generic.TargetedView):
def real_dispatch(self, request, target):
redirect_url = reverse('misago:admin:users:accounts:index')
return redirect('%s?rank=%s' % (redirect_url, target.pk))
class DefaultRank(RankAdmin, generic.ButtonView):
def check_permissions(self, request, target):
if target.is_default:
message = _('Rank "%(name)s" is already default.')
return message % {'name': target.name}
def button_action(self, request, target):
Rank.objects.make_rank_default(target)
message = _('Rank "%(name)s" has been made default.')
messages.success(request, message % {'name': target.name})
|
nilq/baby-python
|
python
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
General dataset implementations for TensorFlow
"""
from abc import ABCMeta, abstractmethod
from typing import Any, Callable, Dict, Iterable, List, Tuple
from sparseml.tensorflow_v1.utils import tf_compat
__all__ = [
"create_split_iterators_handle",
"Dataset",
]
def _make_initializable_iterator(dataset: tf_compat.data.Dataset):
"""
Make initializable iterator with different versions of TF
:param dataset: the dataset to create the iterator
:return: an iterator
"""
if hasattr(tf_compat.data, "make_initializable_iterator"):
return tf_compat.data.make_initializable_iterator(dataset)
else:
return dataset.make_initializable_iterator()
def create_split_iterators_handle(split_datasets: Iterable) -> Tuple[Any, Any, List]:
"""
Create an iterators handle for switching between datasets easily while training.
:param split_datasets: the datasets to create the splits and handle for
:return: a tuple containing the handle that should be set with a feed dict,
the iterator used to get the next batch,
and a list of the iterators created from the split_datasets
"""
output_types = None
output_shapes = None
split_iterators = []
for split_dataset in split_datasets:
# get_output_types and shapes are not available in TF 1.13 and prior
# hence the following conditional assignments
output_types = (
tf_compat.data.get_output_types(split_dataset)
if hasattr(tf_compat.data, "get_output_types")
else split_dataset.output_types
)
output_shapes = (
tf_compat.data.get_output_shapes(split_dataset)
if hasattr(tf_compat.data, "get_output_shapes")
else split_dataset.output_shapes
)
split_iterators.append(_make_initializable_iterator(split_dataset))
handle = tf_compat.placeholder(tf_compat.string, shape=[])
iterator = tf_compat.data.Iterator.from_string_handle(
handle, output_types, output_shapes
)
return handle, iterator, split_iterators
class Dataset(metaclass=ABCMeta):
"""
Generic dataset implementation for TensorFlow.
Expected to work with the tf.data APIs
"""
@abstractmethod
def __len__(self):
raise NotImplementedError()
def build(
self,
batch_size: int,
repeat_count: int = None,
shuffle_buffer_size: int = None,
prefetch_buffer_size: int = None,
num_parallel_calls: int = None,
) -> tf_compat.data.Dataset:
"""
Create the dataset in the current graph using tf.data APIs
:param batch_size: the batch size to create the dataset for
:param repeat_count: the number of times to repeat the dataset,
if unset or None, will repeat indefinitely
:param shuffle_buffer_size: None if not shuffling,
otherwise the size of the buffer to use for shuffling data
:param prefetch_buffer_size: None if not prefetching,
otherwise the size of the buffer to use for buffering
:param num_parallel_calls: the number of parallel calls to run the
processor function with
:return: a tf.data.Dataset instance
"""
with tf_compat.name_scope(self.name_scope()):
dataset = self.creator()
if shuffle_buffer_size and shuffle_buffer_size > 0:
dataset = dataset.shuffle(
shuffle_buffer_size, reshuffle_each_iteration=True
)
dataset = dataset.map(self.processor, num_parallel_calls=num_parallel_calls)
# Together with shuffling above, putting batch after repeat yields
# batches that straddle epoch boundaries
dataset = dataset.repeat(repeat_count)
dataset = dataset.batch(batch_size)
if prefetch_buffer_size and prefetch_buffer_size > 0:
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
def build_input_fn(
self,
batch_size: int,
repeat_count: int = None,
shuffle_buffer_size: int = None,
prefetch_buffer_size: int = None,
num_parallel_calls: int = None,
) -> Callable[[], Tuple[Dict[str, tf_compat.Tensor], Dict[str, tf_compat.Tensor]]]:
"""
Create an input_fn to be used with Estimators.
Invocation of the input_fn will create the dataset in the current graph
as well as return a tuple containing
(a dictionary of feature tensors, a dictionary of label tensors).
:param batch_size: the batch size to create the dataset for
:param repeat_count: the number of times to repeat the dataset,
if unset or None, will repeat indefinitely
:param shuffle_buffer_size: None if not shuffling,
otherwise the size of the buffer to use for shuffling data
:param prefetch_buffer_size: None if not prefetching,
otherwise the size of the buffer to use for buffering
:param num_parallel_calls: the number of parallel calls to run the
processor function with
:return: a callable representing the input_fn for an Estimator
"""
def input_fn() -> Tuple[
Dict[str, tf_compat.Tensor], Dict[str, tf_compat.Tensor]
]:
dataset = self.build(
batch_size,
repeat_count,
shuffle_buffer_size,
prefetch_buffer_size,
num_parallel_calls,
)
dataset_iter = _make_initializable_iterator(dataset)
tf_compat.add_to_collection(
tf_compat.GraphKeys.TABLE_INITIALIZERS, dataset_iter.initializer
)
iter_batch = dataset_iter.get_next()
features, labels = self.format_iterator_batch(iter_batch)
return features, labels
return input_fn
@abstractmethod
def creator(self) -> tf_compat.data.Dataset:
"""
Implemented by sub classes to create a tf.data dataset for the given impl.
:return: a created tf.data dataset
"""
raise NotImplementedError()
@abstractmethod
def processor(self, *args, **kwargs):
"""
Implemented by sub classes to parallelize and map processing functions
for loading the data of the dataset into memory.
:param args: generic inputs for processing
:param kwargs: generic inputs for processing
:return: the processed tensors
"""
raise NotImplementedError()
@abstractmethod
def format_iterator_batch(
self, iter_batch: Tuple[tf_compat.Tensor, ...]
) -> Tuple[Dict[str, tf_compat.Tensor], Dict[str, tf_compat.Tensor]]:
"""
Implemented by sub classes to parse the output from make_one_shot_iterator
into a features and labels dict to be used with Estimators
:param iter_batch: the batch ref returned from the iterator
:return: a tuple containing
(a dictionary of feature tensors, a dictionary of label tensors)
"""
raise NotImplementedError()
@abstractmethod
def name_scope(self) -> str:
"""
Implemented by sub classes to get a name scope for building the dataset
in the graph
:return: the name scope the dataset should be built under in the graph
"""
raise NotImplementedError()
|
nilq/baby-python
|
python
|
#!/usr/bin/python
"""
This work targets for emulating fog computing infrastructure and fog service and network evaluation.
Original author Tzu-Chiao Yeh (@tz70s), 2017@National Taiwan University, Dependable Distributed System and Network Lab.
Checkout the License for using, modifying and publishing.
"""
import docker
class Env(object):
"""The declaration of some share variables."""
def __init__(self, node_num):
self.docker_client = self.init_docker_client()
self.cidr_list = self.set_cidr(node_num)
self.used_list = [False] * node_num
def init_docker_client(self):
"""Init docker client for docker daemon api """
client = docker.DockerClient(
base_url='unix://var/run/docker.sock', version='auto')
return client
def set_cidr(self, node_num):
"""Set CIDR for private ip pool assignment, return a list of cidrs"""
# TODO: support this, extend to ip_addr class C
if node_num > 200:
print("We don't support nodes exceed 200 currently")
exit(1)
sub = node_num
cidr_list = []
for _ in range(node_num):
sub += 1
substr = str(sub)
cidr_list.append('192.168.' + substr + '.0/24')
return cidr_list
def assign_cidr(self):
"""Assign CIDR for an absraction node, return a string from this method"""
for i in range(len(self.used_list)):
if self.used_list[i] is False:
self.used_list[i] = True
return self.cidr_list[i]
return ""
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.4 on 2021-09-09 13:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("accounts", "0005_add_field_last_modified_20210621_1058"),
]
operations = [
migrations.AddField(
model_name="govdepartment",
name="visualisation_url",
field=models.URLField(
blank=True,
default="",
help_text="URL of the visualisation page for this department",
verbose_name="Visualisation URL",
),
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""Shared utility functions for interacting with the data model."""
import logging
logger = logging.getLogger(__name__)
import os
from binascii import hexlify
def generate_random_digest(num_bytes=28, urandom=None, to_hex=None):
"""Generates a random hash and returns the hex digest as a unicode string.
Defaults to sha224::
>>> import hashlib
>>> h = hashlib.sha224()
>>> digest = generate_random_digest()
>>> len(h.hexdigest()) == len(digest)
True
Pass in ``num_bytes`` to specify a different length hash::
>>> h = hashlib.sha512()
>>> digest = generate_random_digest(num_bytes=64)
>>> len(h.hexdigest()) == len(digest)
True
Returns unicode::
>>> type(digest) == type(u'')
True
"""
# Compose.
if urandom is None:
urandom = os.urandom
if to_hex is None:
to_hex = hexlify
# Get random bytes.
r = urandom(num_bytes)
# Return as a unicode string.
return unicode(to_hex(r))
def ensure_unique(self, query, property_, value, max_iter=30, gen_digest=None):
"""Takes a ``candidate`` value for a unique ``property_`` and iterates,
appending an incremented integer until unique.
"""
# Compose.
if gen_digest is None:
gen_digest = generate_random_digest
# Unpack
candidate = value
# Iterate until the slug is unique.
n = 0
n_str = ''
while True:
# Keep trying slug, slug-1, slug-2, etc.
value = u'{0}{1}'.format(candidate, n_str)
existing = None
existing_instances = query.filter(property_==value).all()
for instance in existing_instances:
if instance != self:
existing = instance
break
if existing and n < 30:
n += 1
# If we've tried 1, 2 ... all the way to ``max_iter``, then
# fallback on appending a random digest rather than a sequential
# number.
suffix = str(n) if n < 20 else gen_digest(num_bytes=8)
n_str = u'-{0}'.format(suffix)
continue
break
return value
def get_or_create(cls, **kwargs):
"""Get or create a ``cls`` instance using the ``kwargs`` provided.
>>> from mock import Mock
>>> mock_cls = Mock()
>>> kwargs = dict(foo='bar')
If an instance matches the filter kwargs, return it::
>>> mock_cls.query.filter_by.return_value.first.return_value = 'exist'
>>> get_or_create(mock_cls, **kwargs)
'exist'
>>> mock_cls.query.filter_by.assert_called_with(**kwargs)
Otherwise return a new instance, initialised with the ``kwargs``::
>>> mock_cls = Mock()
>>> mock_cls.return_value = 'new'
>>> mock_cls.query.filter_by.return_value.first.return_value = None
>>> get_or_create(mock_cls, **kwargs)
'new'
>>> mock_cls.assert_called_with(**kwargs)
"""
instance = cls.query.filter_by(**kwargs).first()
if not instance:
instance = cls(**kwargs)
return instance
def get_all_matching(cls, column_name, values):
"""Get all the instances of ``cls`` where the column called ``column_name``
matches one of the ``values`` provided.
Setup::
>>> from mock import Mock
>>> mock_cls = Mock()
>>> mock_cls.query.filter.return_value.all.return_value = ['result']
Queries and returns the results::
>>> get_all_matching(mock_cls, 'a', [1,2,3])
['result']
>>> mock_cls.a.in_.assert_called_with([1,2,3])
>>> mock_cls.query.filter.assert_called_with(mock_cls.a.in_.return_value)
"""
column = getattr(cls, column_name)
query = cls.query.filter(column.in_(values))
return query.all()
def get_object_id(instance):
"""Return an identifier that's unique across database tables, e.g.::
>>> from mock import MagicMock
>>> mock_user = MagicMock()
>>> mock_user.__tablename__ = 'users'
>>> mock_user.id = 1234
>>> get_object_id(mock_user)
u'users#1234'
"""
return u'{0}#{1}'.format(instance.__tablename__, instance.id)
|
nilq/baby-python
|
python
|
#CGI(Common Gateway Interface),通用网关接口,它是一段程序,运行在服务器上如:HTTP服务器,提供同客户端HTML页面的接口
'''
开启apache: sudo apachectl start
重启apache: sudo apachectl restart
关闭apache: sudo apachectl stop
'''
#http://localhost/cgi-bin/hello.py
#/private/etc/apache2/httpd.conf apache服务器的配置路径
#/资源库/WebServer/Documents apache服务器访问路径
#/资源库/WebServer/CGI-Executables cgi访问路径
#mac的具体配置可以查看这个简书https://www.jianshu.com/p/68b11edc055e
#按照以上的配置完成后,可能会出现 You don't have permission to access..."的错误
#解决:将"Require all denied"修改成"Require all granted"
#例子
'''
http://localhost/cgi-bin/hello.py
http://localhost/cgi-bin/path.py
'''
|
nilq/baby-python
|
python
|
class Solution(object):
def merge(self, nums1, m, nums2, n):
"""
:type nums1: List[int]
:type m: int
:type nums2: List[int]
:type n: int
:rtype: void Do not return anything, modify nums1 in-place instead.
"""
idx = len(nums1) - 1
hi1, hi2 = m - 1, n - 1
while hi1 >= 0 and hi2 >= 0:
if nums1[hi1] > nums2[hi2]:
nums1[idx] = nums1[hi1]
hi1 -= 1
else:
nums1[idx] = nums2[hi2]
hi2 -= 1
idx -= 1
while hi2 >= 0:
nums1[idx] = nums2[hi2]
hi2 -= 1
idx -= 1
|
nilq/baby-python
|
python
|
"""Manages plotting, provides a single interface
for different plots with different backends."""
from __future__ import print_function, absolute_import
import os
import sys
import importlib
import traceback
import numpy
from matplotlib.colors import LinearSegmentedColormap
from vcs.colors import matplotlib2vcs
import acme_diags
from acme_diags.driver.utils.general import get_set_name
def _get_plot_fcn(backend, set_num):
"""Get the actual plot() function based on the backend and set_num."""
try:
if backend in ['matplotlib', 'mpl']:
backend = 'cartopy'
set_num = get_set_name(set_num)
mod_str = 'acme_diags.plot.{}.{}_plot'.format(backend, set_num)
module = importlib.import_module(mod_str)
return module.plot
except ImportError:
print(
'Plotting for set {} with {} is not supported'.format(
set_num, backend))
traceback.print_exc()
def plot(set_num, ref, test, diff, metrics_dict, parameter):
"""Based on set_num and parameter.backend,
call the correct plotting function."""
if hasattr(parameter, 'plot'):
parameter.plot(ref, test, diff, metrics_dict, parameter)
else:
if parameter.backend not in ['vcs', 'cartopy', 'mpl', 'matplotlib']:
raise RuntimeError(
'Invalid backend, choose either "vcs" or "matplotlib"/"mpl"/"cartopy"')
plot_fcn = _get_plot_fcn(parameter.backend, set_num)
try:
plot_fcn(ref, test, diff, metrics_dict, parameter)
except Exception as e:
print('Error while plotting {} with backend {}'.format(set_num, parameter.backend))
traceback.print_exc()
if parameter.debug:
sys.exit()
def get_colormap(colormap, parameters):
"""Get the colormap (string, list for vcs, or mpl colormap obj), which can be
loaded from a local file in the cwd, installed file, or a predefined mpl/vcs one."""
colormap = str(
colormap) # unicode don't seem to work well with string.endswith()
if not colormap.endswith('.rgb'): # predefined vcs/mpl colormap
return colormap
installed_colormap = os.path.join(acme_diags.INSTALL_PATH, 'colormaps', colormap)
if os.path.exists(colormap):
# colormap is an .rgb in the current directory
pass
elif not os.path.exists(colormap) and os.path.exists(installed_colormap):
# use the colormap from /plot/colormaps
colormap = installed_colormap
elif not os.path.exists(colormap) and not os.path.exists(installed_colormap):
pth = os.path.join(acme_diags.INSTALL_PATH, 'colormaps')
msg = "File {} isn't in the current working directory or installed in {}"
raise IOError(msg.format(colormap, pth))
rgb_arr = numpy.loadtxt(colormap)
rgb_arr = rgb_arr / 255.0
if parameters.backend in ['cartopy', 'mpl', 'matplotlib']:
cmap = LinearSegmentedColormap.from_list(name=colormap, colors=rgb_arr)
return cmap
elif parameters.backend in ['vcs']:
n_levels = 240
cmap = LinearSegmentedColormap.from_list(name=colormap, colors=rgb_arr, N=n_levels)
vcs_cmap = matplotlib2vcs(cmap, vcs_name=colormap)
return vcs_cmap, list(range(n_levels))
else:
raise RuntimeError('Invalid backend: {}'.format(parameters.backend))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
@author: Aditya Intwala
Copyright (C) 2016, Aditya Intwala.
Licensed under the Apache License 2.0. See LICENSE file in the project root for full license information.
"""
import cv2
from Core.Math.Point2 import Point2
class Eraser():
@staticmethod
def ErasePixel(img, pixel):
img.itemset((pixel[0], pixel[1], 0), 255)
img.itemset((pixel[0], pixel[1], 1), 255)
img.itemset((pixel[0], pixel[1], 2), 255)
return img
@staticmethod
def EraseLine(img, p1, p2):
P1 = (int(p1.x), int(p1.y))
P2 = (int(p2.x), int(p2.y))
Eraser.checkForVicinity(img,p1,p2)
cv2.line(img, P1, P2, (255,255,255),5)
return img
@staticmethod
def checkForVicinity(img, p1, p2):
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret,img_thresh = cv2.threshold(img_gray,0,255,cv2.THRESH_BINARY | cv2.THRESH_OTSU)
pmid = Point2(int((p1.x + p2.x)/2),int((p1.y + p2.y)/2))
pixelPresent = 1
if img_thresh[(pmid.y)+1, (pmid.x)+1] == 0:
pixelPresent +=1
if img_thresh[(pmid.y)-1, (pmid.x)-1] == 0:
pixelPresent +=1
if img_thresh[(pmid.y)+2, (pmid.x)+2] == 0:
pixelPresent +=1
if img_thresh[(pmid.y)-2, (pmid.x)-2] == 0:
pixelPresent +=1
if pixelPresent == 4:
if img_thresh[(pmid.y)+3, (pmid.x)+3] == 0 or img_thresh[(pmid.y)-3, (pmid.x)-3] == 0 :
pixelPresent +=1
return pixelPresent
@staticmethod
def EraseBox(img, p1, p2):
P1 = (p1.x, p1.y)
P2 = (p2.x, p2.y)
cv2.rectangle(img, P1, P2, (255,255,255), -1)
return img
@staticmethod
def EraseCircle(img, p1, radius):
P1 = (int(p1.x), int(p1.y))
Radius = (int(radius))
cv2.circle(img, P1, Radius, (255,255,255),2)
return img
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import sys
def combineJsons(jsonFile1, jsonFile2, outputFile):
dict1 = json.load(open(jsonFile1))
dict2 = json.load(open(jsonFile2))
dict3 = dict(dict1.items() + dict2.items())
with open(outputFile, 'w') as output:
json.dump(dict3, output, indent=2, sort_keys=True)
return True
if __name__ == '__main__':
if (len(sys.argv) < 4):
raise Exception,u"3 arguments needed"
print(combineJsons(sys.argv[1], sys.argv[2], sys.argv[3]))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-10-25 15:27
from __future__ import unicode_literals
import calaccess_raw.annotations
import calaccess_raw.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('calaccess_raw', '0007_auto_20160831_0132'),
]
operations = [
migrations.AlterField(
model_name='cvr2campaigndisclosurecd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F425', b'Form 425 (Semi-Annual Statement of No Activity (Recipient Committee)): Part 1, Committee Information'), (b'F450', b'Form 450 (Campaign Disclosure Statement, Short Form (Recipient Committee)): Part 3, Committee Information'), (b'F460', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Cover Page, Part 2'), (b'F465', b'Form 465 (Supplemental Independent Expenditure Report): Part 5, Filing Officers')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=23), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=31)], help_text='Name of the source filing form or schedule', max_length=4),
),
migrations.AlterField(
model_name='cvr2socd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F400', b'Form 400 (Statement of Organization (Slate Mailer Organization)): Part 3, Individuals Who Authorize Contents Of Slate Mailers'), (b'F410', b'Form 410 (Statement of Organization (Recipient Committee)): Part 4, Type of Committee')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=38), calaccess_raw.annotations.DocumentCloud(end_page=46, id='2712033-Cal-Format-1-05-02', start_page=45), calaccess_raw.annotations.DocumentCloud(end_page=59, id='2712034-Cal-Format-201', start_page=58)], help_text="Form type of the filing the record is included in. This must equal the form_type of the parent filing's cover (CVR) record.", max_length=4, verbose_name='form type'),
),
migrations.AlterField(
model_name='cvr3verificationinfocd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[('F400', b'Form 400 (Statement of Organization (Slate Mailer Organization)): Part 5, Verification'), ('F401', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Cover Page'), ('F402', b'Form 402 (Statement of Termination (Slate Mailer Organization)): Verification'), ('F410', b'Form 410 (Statement of Organization (Recipient Committee)): Part 3, Verification'), ('F425', b'Form 425 (Semi-Annual Statement of No Activity (Recipient Committee)): Part 3, Verification'), ('F450', b'Form 450 (Campaign Disclosure Statement, Short Form (Recipient Committee)): Part 4, Verification'), ('F460', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Cover Page, Part 1'), ('F461', b'Form 461 (Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)): Part 4, Verification'), ('F465', b'Form 465 (Supplemental Independent Expenditure Report): Part 6, Verification'), ('F511', b'Form 511: Paid Spokesperson Report'), ('F900', b'Form 900: Campaign Disclosure Statement (Public employee retirement board candidate)')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=50), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=64)], help_text='Name of the source filing form or schedule', max_length=4),
),
migrations.AlterField(
model_name='cvrcampaigndisclosurecd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[('F401', b'Form 401: Campaign Disclosure Statement (Slate Mailer Organization)'), ('F425', b'Form 425: Semi-Annual Statement of No Activity (Recipient Committee)'), ('F450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), ('F460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), ('F461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)'), ('F465', b'Form 465: Supplemental Independent Expenditure Report'), ('F496', b'Form 496: Late Independent Expenditure Report'), ('F497', b'Form 497: Late Contribution Report'), ('F498', b'Form 498: Late Payment Report (Slate Mailer Organization)'), ('F511', b'Form 511: Paid Spokesperson Report'), ('F900', b'Form 900: Campaign Disclosure Statement (Public employee retirement board candidate)')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=18), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=22)], help_text='Name of the source filing form or schedule', max_length=4),
),
migrations.AlterField(
model_name='cvrcampaigndisclosurecd',
name='reportname',
field=calaccess_raw.fields.CharField(blank=True, choices=[('450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), ('460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), ('461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)')], db_column='REPORTNAME', documentcloud_pages=(calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=15), calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=20), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=19), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=26)), help_text='Attached campaign disclosure statement type. Legal values are 450, 460, and 461.', max_length=3),
),
migrations.AlterField(
model_name='cvrf470cd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F470', b'Form 470: Campaign Disclosure Statement, Short Form (Officeholders and Candidates)')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=22), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=29)], help_text='Type of Filing or Formset. The value of this column will always be equal to F470.', max_length=4),
),
migrations.AlterField(
model_name='cvrsocd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[('F400', b'Form 400: Statement of Organization (Slate Mailer Organization)'), ('F402', b'Form 402: Statement of Termination (Slate Mailer Organization)'), ('F410', b'Form 410: Statement of Organization (Recipient Committee)')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=46), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=59)], help_text='Name of the source filing form or schedule', max_length=4, verbose_name='form type'),
),
migrations.AlterField(
model_name='debtcd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule F, Accrued Expenses (Unpaid Bills)')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=33), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=45)], help_text='Schedule Name/ID: (F - Sched F / Accrued Expenses)', max_length=1),
),
migrations.AlterField(
model_name='efsfilinglogcd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F400', b'Form 400: Statement of Organization (Slate Mailer Organization)'), (b'F401', b'Form 401: Campaign Disclosure Statement (Slate Mailer Organization)'), (b'F402', b'Form 402: Statement of Termination (Slate Mailer Organization)'), (b'F410', b'Form 410: Statement of Organization (Recipient Committee)'), (b'F425', b'Form 425: Semi-Annual Statement of No Activity (Recipient Committee)'), (b'F450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), (b'F460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), (b'F461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)'), (b'F465', b'Form 465: Supplemental Independent Expenditure Report'), (b'F496', b'Form 496: Late Independent Expenditure Report'), (b'F497', b'Form 497: Late Contribution Report'), (b'F498', b'Form 498: Late Payment Report (Slate Mailer Organization)'), (b'F601', b'Form 601: Lobbying Firm Registration Statement'), (b'F602', b'Form 602: Lobbying Firm Activity Authorization'), (b'F603', b'Form 603: Lobbyist Employer or Lobbying Coalition Registration Statement'), (b'F604', b'Form 604: Lobbyist Certification Statement'), (b'F606', b'Form 606: Notice of Termination'), (b'F607', b'Form 607: Notice of Withdrawal'), (b'F615', b'Form 615: Lobbyist Report'), (b'F625', b'Form 625: Report of Lobbying Firm'), (b'F635', b'Form 635: Report of Lobbyist Employer or Report of Lobbying Coalition'), (b'F645', b'Form 645: Report of Person Spending $5,000 or More'), ('BADFORMAT 253', 'Unknown'), ('form', 'Unknown')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(end_page=8, id='2711624-Overview', start_page=4)], help_text='Name of the source filing form or schedule', max_length=250, verbose_name='form type'),
),
migrations.AlterField(
model_name='expncd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F450P5', b'Form 450 (Campaign Disclosure Statement, Short Form (Recipient Committee)): Part 5, Payments Made'), (b'D', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule D, Summary of Expenditures Supporting / Opposing Other Candidates, Measures and Committees'), (b'E', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule E, Payments Made'), (b'G', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule G, Payments Made by an Agent or Independent Contractor (on Behalf of This Committee)'), (b'F461P5', b'Form 461 (Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)): Part 5, Contributions (Including Loans, Forgiveness of Loans, and LoanGuarantees) and Expenditures Made'), (b'F465P3', b'Form 465 (Supplemental Independent Expenditure Report): Part 3, Independent Expenditures Made'), (b'F900', b'Form 900: Campaign Disclosure Statement (Public employee retirement board candidate)')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=31), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=42)], help_text='Name of the source filing form or schedule', max_length=6),
),
migrations.AlterField(
model_name='f495p2cd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), (b'F460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), (b'F495', b'Form 495: Supplemental Pre-Election Campaign Statement (Recipient Committee)')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=26), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=35)], help_text='Name of the source filing form to which the Form 495 is attached (must equal Form_Type in CVR record)', max_length=4),
),
migrations.AlterField(
model_name='filerfilingscd',
name='form_id',
field=calaccess_raw.fields.CharField(choices=[(b'F400', b'Form 400: Statement of Organization (Slate Mailer Organization)'), (b'F401', b'Form 401: Campaign Disclosure Statement (Slate Mailer Organization)'), (b'F402', b'Form 402: Statement of Termination (Slate Mailer Organization)'), (b'F405', b'Form 405: Amendment to Campaign Disclosure Statement'), (b'F410', b'Form 410: Statement of Organization (Recipient Committee)'), (b'F415', b'Form 415: Title Unknown'), (b'F416', b'Form 416: Title Unknown'), (b'F419', b'Form 419: Campaign Disclosure Statement, Long Form (Ballot Measure Committee)'), (b'F420', b'Form 420: Campaign Disclosure Statement, Long Form (Recipient Committee)'), (b'F425', b'Form 425: Semi-Annual Statement of No Activity (Recipient Committee)'), (b'F430', b'Form 430: Title Unknown'), (b'F450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), (b'F460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), (b'F461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)'), (b'F465', b'Form 465: Supplemental Independent Expenditure Report'), (b'F470', b'Form 470: Campaign Disclosure Statement, Short Form (Officeholders and Candidates)'), (b'F490', b'Form 490: Campaign Disclosure Statement, Long Form (Officeholders and Candidates)'), (b'F495', b'Form 495: Supplemental Pre-Election Campaign Statement (Recipient Committee)'), (b'F496', b'Form 496: Late Independent Expenditure Report'), (b'F497', b'Form 497: Late Contribution Report'), (b'F498', b'Form 498: Late Payment Report (Slate Mailer Organization)'), (b'F501', b'Form 501: Candidate Intention Statement'), (b'F502', b'Form 502: Campaign Bank Account Statement'), (b'F511', b'Form 511: Paid Spokesperson Report'), (b'E530', b'Electronic Form 530: Electronic Issue Advocacy Report'), (b'F601', b'Form 601: Lobbying Firm Registration Statement'), (b'F602', b'Form 602: Lobbying Firm Activity Authorization'), (b'F603', b'Form 603: Lobbyist Employer or Lobbying Coalition Registration Statement'), (b'F604', b'Form 604: Lobbyist Certification Statement'), (b'F605', b'Form 605: Amendment to Registration, Lobbying Firm, Lobbyist Employer, Lobbying Coalition'), (b'F606', b'Form 606: Notice of Termination'), (b'F607', b'Form 607: Notice of Withdrawal'), (b'F615', b'Form 615: Lobbyist Report'), (b'F625', b'Form 625: Report of Lobbying Firm'), (b'S630', b'Schedule 630: Payments Made to Lobbying Coalitions (Attachment to Form 625 or 635) '), (b'F635', b'Form 635: Report of Lobbyist Employer or Report of Lobbying Coalition'), (b'S635-C', b'Schedule 635C: Payments Received by Lobbying Coalitions'), (b'S640', b'Schedule 640: Governmental Agencies Reporting (Attachment to Form 635 or Form 645)'), (b'F645', b'Form 645: Report of Person Spending $5,000 or More'), (b'F690', b'Form 690: Amendment to Lobbying Disclosure Report'), (b'F700', b'Form 700: Statement of Economic Interest'), (b'F900', b'Form 900: Campaign Disclosure Statement (Public employee retirement board candidate)'), ('F111', 'Unknown'), ('F410 AT', 'Unknown'), ('F410ATR', 'Unknown'), ('F421', 'Unknown'), ('F440', 'Unknown'), ('F470S', b'Form 470: Campaign Disclosure Statement, Short Form (Officeholders and Candidates)'), ('F480', 'Unknown'), ('F500', 'Unknown'), ('F501502', 'Forms 501 and/or 502 (Candidate Intention and/or Bank Account Statements)'), ('F555', 'Unknown'), ('F666', 'Unknown'), ('F777', 'Unknown'), ('F888', 'Unknown'), ('F999', 'Unknown')], db_column='FORM_ID', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711614-CalAccessTablesWeb', start_page=65)], help_text='Form identification code', max_length=7, verbose_name='form type'),
),
migrations.AlterField(
model_name='headercd',
name='form_id',
field=calaccess_raw.fields.CharField(choices=[('AF490', 'Form 490, Part A'), ('AP1', 'Allocation Part 1'), ('AP2', 'Allocation Part 2'), ('BF490', 'Form 490, Part B'), ('CF490', 'Form 490, Part C'), ('DF490', 'Form 490, Part D'), ('EF490', 'Form 490, Part E'), ('F450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), ('F460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), ('F461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)'), ('FF490', 'Form 490, Part F'), ('HF490', 'Form 490, Part H'), ('IF490', 'Form 490, Part I')], db_column='FORM_ID', help_text='Form identification code', max_length=5, verbose_name='Form ID'),
),
migrations.AlterField(
model_name='lccmcd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F615P2', b'Form 615 (Lobbyist Report): Part 2, Campaign Contributions Made or Delivered'), (b'F625P4B', b'Form 625 (Report of Lobbying Firm): Part 4, Campaign Contributions Made'), (b'F635P4B', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 4, Campaign Contributions Made'), (b'F645P3B', b'Form 645 (Report of Person Spending $5,000 or More): Part 3, Campaign Contributions Made')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=64), calaccess_raw.annotations.DocumentCloud(end_page=79, id='2712034-Cal-Format-201', start_page=78)], help_text='Name of the source filing form or schedule', max_length=7),
),
migrations.AlterField(
model_name='lempcd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F601P2A', b'Form 601 (Lobbying Firm Registration Statement): Part 2, Section A, Lobbyist Employers'), (b'F601P2B', b'Form 601 (Lobbying Firm Registration Statement): Part 2, Section B, Subcontracted Clients')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=75), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=90)], help_text='Name of the source filing form or schedule', max_length=7, verbose_name='form type'),
),
migrations.AlterField(
model_name='lexpcd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F615P1', b'Form 615 (Lobbyist Report): Part 1, Activity Expenses Paid, Incurred, Arranged or Provided by the Lobbyist'), (b'F625P3A', b'Form 625 (Report of Lobbying Firm): Part 3, Payments Made In Connection With Lobbying Activities, Section A, Activity Expenses'), (b'F635P3C', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section C, Activity Expenses'), (b'F645P2A', b'Form 645 (Report of Person Spending $5,000 or More): Part 2, Payments Made this Period, Section A, Activity Expenses')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=61), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=74)], help_text='Name of the source filing form or schedule', max_length=7),
),
migrations.AlterField(
model_name='loancd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'B1', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule B, Part 1, Loans Received'), (b'B2', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule B, Part 2, Loan Guarantors'), (b'B3', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule B, Part 3, Outstanding Balance'), (b'H', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Loans Made to Others'), (b'H1', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Part 1, Loans Made'), (b'H2', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Part 2, Repayments Rcvd'), (b'H3', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Part 3, Outstanding Loans')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=35), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=47)], help_text='Name of the source filing form or schedule', max_length=2),
),
migrations.AlterField(
model_name='lothcd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F625P3B', b'Form 625 (Report of Lobbying Firm): Part 3, Payments Made In Connection With Lobbying Activities, Section B, Payments Made')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=63), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=77)], help_text='Name of the source filing form or schedule', max_length=7, verbose_name='form type'),
),
migrations.AlterField(
model_name='lpaycd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F625P2', b'Form 625 (Report of Lobbying Firm): Part 2, Payments Received in Connection with Lobbying Activity'), (b'F635P3B', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section B, Payments To Lobbying Firms')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=62), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=76)], help_text='Name of the source filing form or schedule', max_length=7, verbose_name='form type'),
),
migrations.AlterField(
model_name='rcptcd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'E530', b'Electronic Form 530: Electronic Issue Advocacy Report'), (b'F900', b'Form 900: Campaign Disclosure Statement (Public employee retirement board candidate)'), (b'F401A', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule A, Payments Received'), (b'A', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule A, Monetary Contributions Received'), (b'A-1', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule A-1, Contributions Transferred to Special Election Commitee'), (b'C', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule C, Non-Monetary Contributions Received'), (b'I', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule I, Miscellanous increases to cash'), (b'F496P3', b'Form 496 (Late Independent Expenditure Report): Part 3, Contributions > $100 Received')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=29), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=37)], help_text='Name of the source filing form or schedule', max_length=9),
),
migrations.AlterField(
model_name='rcptcd',
name='int_rate',
field=calaccess_raw.fields.CharField(blank=True, db_column='INT_RATE', help_text='This field is undocumented. The observed values look like filer_ids taken from section 5, cover page 2 of Form 460 (Related Committees Not Included in this Statement).', max_length=9),
),
migrations.AlterField(
model_name='receivedfilingscd',
name='form_id',
field=calaccess_raw.fields.CharField(blank=True, choices=[(b'F400', b'Form 400: Statement of Organization (Slate Mailer Organization)'), (b'F401', b'Form 401: Campaign Disclosure Statement (Slate Mailer Organization)'), (b'F402', b'Form 402: Statement of Termination (Slate Mailer Organization)'), (b'F410', b'Form 410: Statement of Organization (Recipient Committee)'), (b'F425', b'Form 425: Semi-Annual Statement of No Activity (Recipient Committee)'), (b'F450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), (b'F460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), (b'F461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)'), (b'F465', b'Form 465: Supplemental Independent Expenditure Report'), (b'F496', b'Form 496: Late Independent Expenditure Report'), (b'F497', b'Form 497: Late Contribution Report'), (b'F498', b'Form 498: Late Payment Report (Slate Mailer Organization)'), (b'F601', b'Form 601: Lobbying Firm Registration Statement'), (b'F602', b'Form 602: Lobbying Firm Activity Authorization'), (b'F603', b'Form 603: Lobbyist Employer or Lobbying Coalition Registration Statement'), (b'F604', b'Form 604: Lobbyist Certification Statement'), (b'F606', b'Form 606: Notice of Termination'), (b'F607', b'Form 607: Notice of Withdrawal'), (b'F615', b'Form 615: Lobbyist Report'), (b'F625', b'Form 625: Report of Lobbying Firm'), (b'F635', b'Form 635: Report of Lobbyist Employer or Report of Lobbying Coalition'), (b'F645', b'Form 645: Report of Person Spending $5,000 or More')], db_column='FORM_ID', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(end_page=8, id='2711624-Overview', start_page=4)], help_text='Form identification code', max_length=7, verbose_name='form identification code'),
),
migrations.AlterField(
model_name='s401cd',
name='form_type',
field=calaccess_raw.fields.CharField(blank=True, choices=[(b'F401B', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule B, Payments Made'), (b'F401B-1', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule B-1, Payments Made by Agent or Independent Contractor'), (b'F401C', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule C, Persons Receiving $1,000 or More'), (b'F401D', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule D, Candidates and Measures Not Listed on Schedule A')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=39), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=51)], help_text='Name of the source filing form or schedule', max_length=7),
),
migrations.AlterField(
model_name='s497cd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F497P1', b'Form 497 (Late Contribution Report): Part 1, Contributions Received'), (b'F497P2', b'Form 497 (Late Contribution Report): Part 2, Contributions Made')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=41), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=54)], help_text='Name of the source filing form or schedule', max_length=6),
),
migrations.AlterField(
model_name='s498cd',
name='form_type',
field=calaccess_raw.fields.CharField(blank=True, choices=[(b'F498-A', b'Form 498 (Late Payment Report (Slate Mailer Organization)): Part A, Late Payments Attributed To'), (b'F498-R', b'Form 498 (Late Payment Report (Slate Mailer Organization)): Part R, Late Payments Received From')], db_column='FORM_TYPE', documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=43), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=56)], help_text='Name of the source filing form or schedule', max_length=9),
),
migrations.AlterField(
model_name='smrycd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F401', b'Form 401: Campaign Disclosure Statement (Slate Mailer Organization)'), (b'F401A', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule A, Payments Received'), (b'F401B', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule B, Payments Made'), (b'F401B-1', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule B-1, Payments Made by Agent or Independent Contractor'), (b'F450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), (b'F460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), (b'A', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule A, Monetary Contributions Received'), (b'B1', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule B, Part 1, Loans Received'), (b'B2', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule B, Part 2, Loan Guarantors'), (b'B3', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule B, Part 3, Outstanding Balance'), (b'C', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule C, Non-Monetary Contributions Received'), (b'D', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule D, Summary of Expenditures Supporting / Opposing Other Candidates, Measures and Committees'), (b'E', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule E, Payments Made'), (b'F', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule F, Accrued Expenses (Unpaid Bills)'), (b'G', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule G, Payments Made by an Agent or Independent Contractor (on Behalf of This Committee)'), (b'H', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Loans Made to Others'), (b'H1', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Part 1, Loans Made'), (b'H2', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Part 2, Repayments Rcvd'), (b'H3', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Part 3, Outstanding Loans'), (b'I', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule I, Miscellanous increases to cash'), (b'F461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)'), (b'F465', b'Form 465: Supplemental Independent Expenditure Report'), (b'F625', b'Form 625: Report of Lobbying Firm'), (b'F625P2', b'Form 625 (Report of Lobbying Firm): Part 2, Payments Received in Connection with Lobbying Activity'), (b'F625P3A', b'Form 625 (Report of Lobbying Firm): Part 3, Payments Made In Connection With Lobbying Activities, Section A, Activity Expenses'), (b'F625P3B', b'Form 625 (Report of Lobbying Firm): Part 3, Payments Made In Connection With Lobbying Activities, Section B, Payments Made'), (b'F635', b'Form 635: Report of Lobbyist Employer or Report of Lobbying Coalition'), (b'F635P3A', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section A, Payments To In-house Employee Lobbyists'), (b'F635P3B', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section B, Payments To Lobbying Firms'), (b'F635P3C', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section C, Activity Expenses'), (b'F635P3D', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section D, Other Payments to Influence Legislative or Administrative Action'), (b'F635P3E', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section E, Payments in Connection with Administrative Testimony in Ratemaking Proceedings Before The California Public Utilities Commission'), (b'S640', b'Schedule 640: Governmental Agencies Reporting (Attachment to Form 635 or Form 645)'), (b'F645', b'Form 645: Report of Person Spending $5,000 or More'), (b'F645P2A', b'Form 645 (Report of Person Spending $5,000 or More): Part 2, Payments Made this Period, Section A, Activity Expenses'), (b'F645P2B', b'Form 645 (Report of Person Spending $5,000 or More): Part 2, Payments Made this Period, Section B, Other Payments to Influence Legislative or Administrative Action'), (b'F645P2C', b'Form 645 (Report of Person Spending $5,000 or More): Part 2, Payments Made this Period, Section C, Payments in Connection with Administrative Testimony in Ratemaking Proceedings Before the California Public Utilities Commission'), (b'F900', b'Form 900: Campaign Disclosure Statement (Public employee retirement board candidate)'), ('401A', calaccess_raw.annotations.FilingFormSection(db_value=b'F401A', documentcloud_id=None, end_page=7, form=calaccess_raw.annotations.FilingForm(b'F401', b'Campaign Disclosure Statement (Slate Mailer Organization)', description=b'Form 401 is filed by slate mailer organizations to disclose payments made and received in connection with producing slate mailers.', documentcloud_id=b'2781366-401-2005-01', group=b'CAMPAIGN'), id=b'A', start_page=5, title=b'Schedule A, Payments Received')), ('401B', calaccess_raw.annotations.FilingFormSection(db_value=b'F401B', documentcloud_id=None, end_page=9, form=calaccess_raw.annotations.FilingForm(b'F401', b'Campaign Disclosure Statement (Slate Mailer Organization)', description=b'Form 401 is filed by slate mailer organizations to disclose payments made and received in connection with producing slate mailers.', documentcloud_id=b'2781366-401-2005-01', group=b'CAMPAIGN'), id=b'B', start_page=8, title=b'Schedule B, Payments Made')), ('401B-1', calaccess_raw.annotations.FilingFormSection(db_value=b'F401B-1', documentcloud_id=None, end_page=None, form=calaccess_raw.annotations.FilingForm(b'F401', b'Campaign Disclosure Statement (Slate Mailer Organization)', description=b'Form 401 is filed by slate mailer organizations to disclose payments made and received in connection with producing slate mailers.', documentcloud_id=b'2781366-401-2005-01', group=b'CAMPAIGN'), id=b'B-1', start_page=10, title=b'Schedule B-1, Payments Made by Agent or Independent Contractor'))], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=86), calaccess_raw.annotations.DocumentCloud(end_page=28, id='2712033-Cal-Format-1-05-02', start_page=27), calaccess_raw.annotations.DocumentCloud(end_page=60, id='2712033-Cal-Format-1-05-02', start_page=59), calaccess_raw.annotations.DocumentCloud(end_page=37, id='2712034-Cal-Format-201', start_page=36), calaccess_raw.annotations.DocumentCloud(end_page=74, id='2712034-Cal-Format-201', start_page=73)], help_text='Name of the source filing form or schedule', max_length=8),
),
migrations.AlterField(
model_name='spltcd',
name='pform_type',
field=calaccess_raw.fields.CharField(choices=[(b'A', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule A, Monetary Contributions Received'), (b'B1', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule B, Part 1, Loans Received'), (b'B2', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule B, Part 2, Loan Guarantors'), (b'C', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule C, Non-Monetary Contributions Received'), (b'D', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule D, Summary of Expenditures Supporting / Opposing Other Candidates, Measures and Committees'), (b'F450P5', b'Form 450 (Campaign Disclosure Statement, Short Form (Recipient Committee)): Part 5, Payments Made'), (b'H', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule H, Loans Made to Others')], db_column='PFORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=18)], help_text='Parent Schedule Type', max_length=7),
),
migrations.AlterField(
model_name='textmemocd',
name='form_type',
field=calaccess_raw.fields.CharField(choices=[(b'F401', b'Form 401: Campaign Disclosure Statement (Slate Mailer Organization)'), (b'F405', b'Form 405: Amendment to Campaign Disclosure Statement'), (b'F410', b'Form 410: Statement of Organization (Recipient Committee)'), (b'F425', b'Form 425: Semi-Annual Statement of No Activity (Recipient Committee)'), (b'F450', b'Form 450: Campaign Disclosure Statement, Short Form (Recipient Committee)'), (b'F460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), (b'F461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)'), (b'F465', b'Form 465: Supplemental Independent Expenditure Report'), (b'F496', b'Form 496: Late Independent Expenditure Report'), (b'F497', b'Form 497: Late Contribution Report'), (b'F498', b'Form 498: Late Payment Report (Slate Mailer Organization)'), (b'F601', b'Form 601: Lobbying Firm Registration Statement'), (b'F602', b'Form 602: Lobbying Firm Activity Authorization'), (b'F603', b'Form 603: Lobbyist Employer or Lobbying Coalition Registration Statement'), (b'F604', b'Form 604: Lobbyist Certification Statement'), (b'F605', b'Form 605: Amendment to Registration, Lobbying Firm, Lobbyist Employer, Lobbying Coalition'), (b'F606', b'Form 606: Notice of Termination'), (b'F607', b'Form 607: Notice of Withdrawal'), (b'F615', b'Form 615: Lobbyist Report'), (b'F625', b'Form 625: Report of Lobbying Firm'), (b'F635', b'Form 635: Report of Lobbyist Employer or Report of Lobbying Coalition'), (b'F645', b'Form 645: Report of Person Spending $5,000 or More'), (b'S630', b'Schedule 630: Payments Made to Lobbying Coalitions (Attachment to Form 625 or 635) '), (b'S635-C', b'Schedule 635C: Payments Received by Lobbying Coalitions'), (b'S640', b'Schedule 640: Governmental Agencies Reporting (Attachment to Form 635 or Form 645)'), ('410', b'Form 410: Statement of Organization (Recipient Committee)'), ('460', b'Form 460: Campaign Disclosure Statement (Recipient Committee)'), ('461', b'Form 461: Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)'), ('465', b'Form 465: Supplemental Independent Expenditure Report'), ('496', b'Form 496: Late Independent Expenditure Report'), ('497', b'Form 497: Late Contribution Report'), ('497P1', b'Form 497 (Late Contribution Report): Part 1, Contributions Received'), ('497P2', b'Form 497 (Late Contribution Report): Part 2, Contributions Made'), ('F401A', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule A, Payments Received'), ('F401B', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule B, Payments Made'), ('F401B-1', b'Form 401 (Campaign Disclosure Statement (Slate Mailer Organization)): Schedule B-1, Payments Made by Agent or Independent Contractor'), ('F450P5', b'Form 450 (Campaign Disclosure Statement, Short Form (Recipient Committee)): Part 5, Payments Made'), ('F461P1', b'Form 461 (Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)): Part 1, Name and Address of Filer'), ('F461P2', b'Form 461 (Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)): Part 2, Nature and Interests of Filer'), ('F461P5', b'Form 461 (Campaign Disclosure Statement (Independent Expenditure Committee & Major Donor Committee)): Part 5, Contributions (Including Loans, Forgiveness of Loans, and LoanGuarantees) and Expenditures Made'), ('F465P3', b'Form 465 (Supplemental Independent Expenditure Report): Part 3, Independent Expenditures Made'), ('F496P3', b'Form 496 (Late Independent Expenditure Report): Part 3, Contributions > $100 Received'), ('F497P1', b'Form 497 (Late Contribution Report): Part 1, Contributions Received'), ('F497P2', b'Form 497 (Late Contribution Report): Part 2, Contributions Made'), ('F498-A', b'Form 498 (Late Payment Report (Slate Mailer Organization)): Part A, Late Payments Attributed To'), ('F498-R', b'Form 498 (Late Payment Report (Slate Mailer Organization)): Part R, Late Payments Received From'), ('F601P2A', b'Form 601 (Lobbying Firm Registration Statement): Part 2, Section A, Lobbyist Employers'), ('F601P2B', b'Form 601 (Lobbying Firm Registration Statement): Part 2, Section B, Subcontracted Clients'), ('F615P1', b'Form 615 (Lobbyist Report): Part 1, Activity Expenses Paid, Incurred, Arranged or Provided by the Lobbyist'), ('F615P2', b'Form 615 (Lobbyist Report): Part 2, Campaign Contributions Made or Delivered'), ('F625P2', b'Form 625 (Report of Lobbying Firm): Part 2, Payments Received in Connection with Lobbying Activity'), ('F625P3A', b'Form 625 (Report of Lobbying Firm): Part 3, Payments Made In Connection With Lobbying Activities, Section A, Activity Expenses'), ('F625P3B', b'Form 625 (Report of Lobbying Firm): Part 3, Payments Made In Connection With Lobbying Activities, Section B, Payments Made'), ('F625P4B', b'Form 625 (Report of Lobbying Firm): Part 4, Campaign Contributions Made'), ('S635', b'Form 635: Report of Lobbyist Employer or Report of Lobbying Coalition'), ('F635P3B', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section B, Payments To Lobbying Firms'), ('F635P3C', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 3, Payments Made in Connection with Lobbying Activities, Section C, Activity Expenses'), ('F635P4B', b'Form 635 (Report of Lobbyist Employer or Report of Lobbying Coalition): Part 4, Campaign Contributions Made'), ('F645P2A', b'Form 645 (Report of Person Spending $5,000 or More): Part 2, Payments Made this Period, Section A, Activity Expenses'), ('F645P3B', b'Form 645 (Report of Person Spending $5,000 or More): Part 3, Campaign Contributions Made'), ('S497', b'Form 497: Late Contribution Report'), ('S635C', b'Schedule 635C: Payments Received by Lobbying Coalitions'), ('A', 'Schedule A of any form (e.g., Forms 401 or 460)'), ('A4', 'Schedule A of any form (e.g., Forms 401 or 460)'), ('A6', 'Schedule A of any form (e.g., Forms 401 or 460)'), ('B', 'Schedule B of any form (e.g., Forms 401 or 460)'), ('B1', 'Schedule B, Part 1 of Forms 401 or 460'), ('B2', 'Schedule B, Part 2 of Forms 401 or 460'), ('B3', 'Schedule B, Part 3 of Forms 401 or 460'), ('C', 'Schedule C of any form (e.g., Forms 401 or F460)'), ('COMMENTS', 'Possibly comments by FPPC for any form?'), ('CVR', 'Cover page for any form (e.g., Forms 460, 461 or 497)'), ('D', 'Schedule D of any form (e.g., Forms 401, 460 or 461)'), ('DEBTF', b'Form 460 (Campaign Disclosure Statement (Recipient Committee)): Schedule F, Accrued Expenses (Unpaid Bills)'), ('E', 'Schedule E of any form (e.g., Forms 460, 461 or 465)'), ('EXPNT', 'Expenditures outlined on any form (e.g. Form 460)'), ('F', 'Schedule F of any form (e.g., Form 460)'), ('G', 'Schedule G of any form (e.g., Form 460)'), ('H', 'Schedule H of any form (e.g., Form 460)'), ('H1', 'Schedule H, Part 1 of any form (e.g., Form 460)'), ('H2', 'Schedule H2, Part 2 of any form (e.g., Form 460)'), ('H3', 'Schedule H3, Part 3 of any form (e.g., Form 460)'), ('I', 'Schedule I of any form (e.g., Form 460)'), ('PT5', 'Part 5 of any form (e.g., Form 461'), ('RCPTB1', 'Schedule B, Part 1 of any form (e.g., Form 460'), ('RCPTC', 'Schedule C of any form (e.g., Form 460)'), ('RCPTI', 'Schedule I of any form (e.g., Form 460)'), ('SCH A', 'Schedule A of any form (e.g., Form 460)'), ('SF', 'Schedule F of any form (e.g., Form 460)'), ('SPLT', 'A memo that applies to multiple items?'), ('SMRY', 'Summary section of any form (e.g., Form 460)'), ('SUM', 'Summary section of any form (e.g., Form 460)'), ('SUMMARY', 'Summary section of any form (e.g., Form 460)')], db_column='FORM_TYPE', db_index=True, documentcloud_pages=[calaccess_raw.annotations.DocumentCloud(id='2711616-MapCalFormat2Fields', start_page=90), calaccess_raw.annotations.DocumentCloud(id='2712034-Cal-Format-201', start_page=16), calaccess_raw.annotations.DocumentCloud(id='2712033-Cal-Format-1-05-02', start_page=13)], help_text='Name of the source filing form or schedule', max_length=8, verbose_name='form type'),
),
]
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import struct
import os, sys
import re
import copy
class Matrix:
"""
Class to Read and Hangle Matrix files
"""
def __init__(self,Path): # Give the Path of the folder containing all the mtrx files
# Read PATH and open file
self.Path = Path
self.fp = None # file variable
for x in os.listdir(Path): # List the folder and look for the _0001.mtrx file
if x[-10:] == "_0001.mtrx":
self.fp = open(self.Path+"/"+x, "rb")
if self.fp == None:
print("Matrix file not found!")
sys.exit(1)
if self.fp.read(8) != b"ONTMATRX": # header of the file
print("Unknown header! Wrong Matrix file format")
sys.exit(2)
self.version = self.fp.read(4) # should be 0101
self.IDs = {}
self.params = {} # dictionary to list all the parameters
self.images = {} # images[x] are the parameters used during the record for file named x
# Parse the file and read the block
while True: # While not EOF scan files and read block
r = self.read_block()
if r == False:
break
def read_string(self):
"""
Strings are stored as UTF-16. First 32-bits is the string length
"""
N = struct.unpack("<L", self.fp.read(4))[0] # string length
if N == 0:
return ""
s = self.fp.read(N*2).decode('utf-16')
return s
def plotSTS(self, ID, num=1): # plot STS file called xxx--ID_num.I(V)_mtrx
x, y = self.getSTS(ID, num)
plt.plot(x, y)
plt.show()
def getUpDown(self, X, Y, NPTS):
"""
Split data in Up and Down measurement, pad them with NaN if necessary and return them in increasing order.
The returned value are X,Yup, Ydown
If Up/Down data are missing an empty array will be returned
"""
if len(Y) < NPTS: # Missing data
Y = np.pad(Y, NPTS, 'constant', constant_values=np.nan)
elif len(Y) > NPTS: # Forward and backward scans
if len(Y) < 2*NPTS: # Missing data
Y = np.pad(Y, 2*NPTS, 'constant', constant_values=np.nan)
if X[NPTS-1] < X[0]:
return X[NPTS:], [Y[NPTS:], Y[NPTS-1::-1]]
else:
return X[:NPTS], [Y[:NPTS], Y[-1:NPTS-1:-1]]
if X[-1] < X[0]:
return X[::-1], [np.empty(NPTS), Y[::-1], np.empty(NPTS)]
return X, [Y, np.empty(NPTS)]
def getSTSData(self, ID, nums=[1]):
if not ID in self.IDs or len(nums) < 1:
return None
# retrieve the spectroscopy data (V, I and an object IM containing the parameters)
V, I, IM = self.getSTS(ID, nums[0], params=True)
NPTS = int(IM['Spectroscopy']['Device_1_Points']['value'])
hasDI = self.IDs[ID]['hasDI']
# Call the function to split and flip data if it's UP/Down measurements
V, I = self.getUpDown(V, I, NPTS)
for num in nums[1:]: # Skip first num as it's already parsed above
X, Y = self.getUpDown(*self.getSTS(ID, num), NPTS=NPTS)
if not np.array_equal(V, X):
raise Exception("Bias axis differs between measurements?!?")
for i in range(2): # i=0: Up scan, i=1: Down scan
I[i] = np.vstack((I[i], Y[i]))
Im = [np.nan]*2 # Store the mean of I
Ims = [np.nan]*2 # Store StDev of I
for i in range(2): # i=0: Up scan, i=1: Down scan
Im[i] = I[i].mean(axis=0)
Ims[i] = I[i].std(axis=0)
if hasDI:
X, dI = self.getUpDown(*self.getDIDV(ID, nums[0]), NPTS=NPTS)
for num in nums[1:]:
X, Y = self.getUpDown(*self.getDIDV(ID, num), NPTS=NPTS)
if not np.array_equal(V, X):
raise Exception("Bias axis differs between measurements?!?")
for i in range(2): # i=0: Up scan, i=1: Down scan
dI[i] = np.vstack((dI[i], Y[i]))
dIm = [np.nan]*2 # Store the mean of dI/dV
dIms = [np.nan]*2 # Store the StdDev of dI/dV
for i in range(2): # i=0: Up scan, i=1: Down scan
dIm[i] = dI[i].mean(axis=0)
dIms[i] = dI[i].std(axis=0)
return {'nums':nums, 'V':V, 'I':I, 'dI':dI, 'Imean':Im, 'Istd':Ims, 'dImean':dIm, 'dIstd':dIms}
def getDIDV(self, ID, num=1):
"""
The dI/dV measurements are stored the same way as the I(V), but with file extension Aux2(V).
"""
return self.getSTS(ID, num, ext='Aux2')
def getSTSparams(self, ID, num=1, ext='I'):
if not ID in self.IDs:
return None, None
I = u"%s--%i_%i.%s(V)_mtrx"%(self.IDs[ID]['root'], ID, num, ext)
if not I in self.images:
return None
return self.images[I]
def getSTS(self, ID, num=1, ext='I', params=False):
"""
Get a spectroscopy file xxxx-ID_num.I(V)_mtrx
"""
IM = self.getSTSparams(ID,num,ext)
if IM == None:
return None
v1 = IM['Spectroscopy']['Device_1_Start']['value'] # Get the start voltage used for the scan
v2 = IM['Spectroscopy']['Device_1_End']['value'] # Get the end voltage for the scan
I = u"%s--%i_%i.%s(V)_mtrx"%(self.IDs[ID]['root'], ID, num, ext)
ImagePath = self.Path+"/"+I
if not os.path.exists(ImagePath):
return None
ff = open(ImagePath, "rb") # read the STS file
if ff.read(8) != b"ONTMATRX":
print("ERROR: Invalid STS format")
sys.exit(1)
if ff.read(4) != b"0101":
print("ERROR: Invalid STS version")
sys.exit(2)
t = ff.read(4) # TLKB header
ff.read(8) # timestamp
ff.read(8) # Skip 8bytes (??? unknown data. Usualy it's = 00 00 00 00 00 00 00 00)
t = ff.read(4) # CSED header
ss = struct.unpack('<15L', ff.read(60)) # 15 uint32. ss[6] and ss[7] store the size of the points. ([6] is what was planned and [7] what was actually recorded)
# ss[6] should be used to reconstruct the X-axis and ss[7] to read the binary data
if ff.read(4) != b'ATAD':
print("ERROR: Data should be here, but aren't. Please debug script")
sys.exit(3)
ff.read(4)
data = np.array(struct.unpack("<%il"%(ss[7]), ff.read(ss[7]*4))) # The data are stored as unsigned LONG
# Reconstruct the x-axis. Take the start and end volatege (v1,v2) with the correct number of points and pad it to the data length. Padding is in 'reflect' mode in the case of Forward/backward scans.
X = np.linspace(v1, v2, int(IM['Spectroscopy']['Device_1_Points']['value']))
if len(X) < ss[6]:
X = np.concatenate((X, X[::-1]))
if len(data) < len(X):
data = np.concatenate((data, [np.nan]*(len(X)-len(data))))
if params:
return X, data, IM
return X, data
def read_value(self):
"""
Values are stored with a specific header for each data type
"""
t = self.fp.read(4)
if t == b"BUOD":
# double
v = struct.unpack("<d", self.fp.read(8))[0]
elif t == b"GNOL":
# uint32
v = struct.unpack("<L", self.fp.read(4))[0]
elif t == b"LOOB":
# bool32
v = struct.unpack("<L", self.fp.read(4))[0] > 0
elif t == b"GRTS":
v = self.read_string()
else:
v = t
return v
def getUI(self):
"""
Read an unsigned int from the file
"""
return struct.unpack("<L", self.fp.read(4))[0]
def read_block(self, sub=False):
indent = self.fp.read(4) # 4bytes forming the header. Those are capital letters between A-Z
if len(indent) < 4: # EOF reached?
return False
bs = struct.unpack("<L", self.fp.read(4))[0]+[8, 0][sub] # Size of the block
r = {"ID":indent, "bs":bs} # Store the parameters found in the block
p = self.fp.tell() # store the file position of the block
if indent == b"DOMP": # Block storing parameters changed during an experiment
self.fp.read(12)
inst = self.read_string()
prop = self.read_string()
unit = self.read_string()
self.fp.read(4)
value =self.read_value()
r.update({'inst':inst, 'prop':prop, 'unit':unit, 'value':value})
self.params[inst][prop].update({'unit':unit, 'value':value}) # Update theparameters information stored in self.params
elif indent == b"CORP": # Processor of scanning window. Useless in this script for the moment
self.fp.read(12)
a = self.read_string()
b = self.read_string()
r.update({'a':a, 'b':b})
elif indent == b"FERB": # A file was stored
self.fp.read(12)
a = self.read_string() # Filename
r['filename'] = a
self.images[a] = copy.deepcopy(self.params) # Store the parameters used to record the file a se
# Create a catalogue to avoid to scan all images later
res = re.search(r'^(.*?)--([0-9]*)_([0-9]*)\.([^_]+)_mtrx$', a)
ID = int(res.group(2))
num = int(res.group(3))
_type = res.group(4)
if not ID in self.IDs:
self.IDs[ID] = {'nums':[], 'root':res.group(1)}
if _type in ["Aux2(V)"]:
self.IDs[ID]['hasDI'] = True
if _type in ["I(V)"]:
self.IDs[ID]['nums'].append(num)
elif indent == b"SPXE": # Initial configuration
self.fp.read(12) # ??? useless 12 bytes
r['LNEG'] = self.read_block(True) # read subblock
r['TSNI'] = self.read_block(True) # read subblock
r['SXNC'] = self.read_block(True) # read subblock
elif indent == b"LNEG":
r.update({'a':self.read_string(), 'b':self.read_string(), 'c':self.read_string()})
elif indent == b"TSNI":
anz = self.getUI()
rr = []
for ai in range(anz):
a = self.read_string()
b = self.read_string()
c = self.read_string()
count = self.getUI()
pa = []
for i in range(count):
x = self.read_string()
y = self.read_string()
pa.append({'a':x, 'b':y})
rr.append({'a':a, 'b':b, 'c':c, 'content':pa})
elif indent == b"SXNC":
count = self.getUI()
r['count'] = count
rr = []
for i in range(count):
a = self.read_string()
b = self.read_string()
k = self.getUI()
kk = []
for j in range(k):
x = self.read_string()
y = self.read_string()
kk.append((x, y))
rr.append((a, b, i, kk))
r['content'] = rr
elif indent == b"APEE": # Store the configurations
self.fp.read(12) # ??? useless 12bytes
num = self.getUI() # Number of parameters class
r['num'] = num
for i in range(num):
inst = self.read_string() # Parameter class name
grp = self.getUI() # Number of parameters in this class
kk = {}
for j in range(grp): # Scan for each parameter, value and unit
prop = self.read_string() # parameter name
unit = self.read_string() # parameter unit
self.fp.read(4) # ???
value = self.read_value() # parameter value
kk[prop] = {"unit":unit, "value":value}
r[inst] = kk
self.params = r # Store this information as initial values for the parmeters
# print(self.params['Spectroscopy'])
self.fp.seek(p) # go back to the beginning of the block
self.fp.read(bs) # go to the next block by skiping the block-size bytes
return r # return the informations collected
|
nilq/baby-python
|
python
|
import os
import sys
import logging
import json
import typing
import collections
from ConfigSpace.configuration_space import ConfigurationSpace, Configuration
from ConfigSpace.hyperparameters import FloatHyperparameter, IntegerHyperparameter
__author__ = "Marius Lindauer"
__copyright__ = "Copyright 2016, ML4AAD"
__license__ = "3-clause BSD"
TrajEntry = collections.namedtuple(
'TrajEntry', ['train_perf', 'incumbent_id', 'incumbent',
'ta_runs', 'ta_time_used', 'wallclock_time'])
class TrajLogger(object):
"""Writes trajectory logs files and creates output directory if not exists already
Attributes
----------
stats
logger
output_dir
aclib_traj_fn
old_traj_fn
trajectory
"""
def __init__(self, output_dir, stats):
"""Constructor
Parameters
----------
output_dir: str
directory for logging (or None to disable logging)
stats: Stats()
Stats object
"""
self.stats = stats
self.logger = logging.getLogger(self.__module__ + "." + self.__class__.__name__)
self.output_dir = output_dir
if output_dir is None or output_dir == "":
self.output_dir = None
self.logger.info("No output directory for trajectory logging "
"specified -- trajectory will not be logged.")
else:
if not os.path.isdir(output_dir):
try:
os.makedirs(output_dir)
except OSError:
self.logger.debug("Could not make output directory.", exc_info=1)
raise OSError("Could not make output directory: "
"{}.".format(output_dir))
self.old_traj_fn = os.path.join(output_dir, "traj_old.csv")
if not os.path.isfile(self.old_traj_fn):
with open(self.old_traj_fn, "w") as fp:
fp.write(
'"CPU Time Used","Estimated Training Performance",'
'"Wallclock Time","Incumbent ID",'
'"Automatic Configurator (CPU) Time",'
'"Configuration..."\n')
self.aclib_traj_fn = os.path.join(output_dir, "traj_aclib2.json")
self.trajectory = []
def add_entry(self, train_perf: float, incumbent_id: int,
incumbent: Configuration):
"""Adds entries to trajectory files (several formats) with using the
same timestamps for each entry
Parameters
----------
train_perf: float
estimated performance on training (sub)set
incumbent_id: int
id of incumbent
incumbent: Configuration()
current incumbent configuration
"""
ta_runs = self.stats.ta_runs
ta_time_used = self.stats.ta_time_used
wallclock_time = self.stats.get_used_wallclock_time()
self.trajectory.append(TrajEntry(train_perf, incumbent_id, incumbent,
ta_runs, ta_time_used, wallclock_time))
if self.output_dir is not None:
self._add_in_old_format(train_perf, incumbent_id, incumbent,
ta_time_used, wallclock_time)
self._add_in_aclib_format(train_perf, incumbent_id, incumbent,
ta_time_used, wallclock_time)
def _add_in_old_format(self, train_perf: float, incumbent_id: int,
incumbent: Configuration, ta_time_used: float,
wallclock_time: float):
"""Adds entries to old SMAC2-like trajectory file
Parameters
----------
train_perf: float
Estimated performance on training (sub)set
incumbent_id: int
Id of incumbent
incumbent: Configuration()
Current incumbent configuration
ta_time_used: float
CPU time used by the target algorithm
wallclock_time: float
Wallclock time used so far
"""
conf = []
for p in incumbent:
if not incumbent.get(p) is None:
conf.append("%s='%s'" % (p, repr(incumbent[p])))
with open(self.old_traj_fn, "a") as fp:
fp.write("%f, %f, %f, %d, %f, %s\n" % (
ta_time_used,
train_perf,
wallclock_time,
incumbent_id,
wallclock_time - ta_time_used,
", ".join(conf)
))
def _add_in_aclib_format(self, train_perf: float, incumbent_id: int,
incumbent: Configuration, ta_time_used: float,
wallclock_time: float):
"""Adds entries to AClib2-like trajectory file
Parameters
----------
train_perf: float
Estimated performance on training (sub)set
incumbent_id: int
Id of incumbent
incumbent: Configuration()
Current incumbent configuration
ta_time_used: float
CPU time used by the target algorithm
wallclock_time: float
Wallclock time used so far
"""
conf = []
for p in incumbent:
if not incumbent.get(p) is None:
conf.append("%s='%s'" % (p, repr(incumbent[p])))
traj_entry = {"cpu_time": ta_time_used,
"total_cpu_time": None, # TODO: fix this
"wallclock_time": wallclock_time,
"evaluations": self.stats.ta_runs,
"cost": train_perf,
"incumbent": conf
}
try:
traj_entry["origin"] = incumbent.origin
except AttributeError:
traj_entry["origin"] = "UNKNOWN"
with open(self.aclib_traj_fn, "a") as fp:
json.dump(traj_entry, fp)
fp.write("\n")
@staticmethod
def read_traj_aclib_format(fn: str, cs: ConfigurationSpace):
"""Reads trajectory from file
Parameters
----------
fn: str
Filename with saved runhistory in self._add_in_aclib_format format
cs: ConfigurationSpace
Configuration Space to translate dict object into Confiuration object
Returns
-------
trajectory: list
Each entry in the list is a dictionary of the form
{
"cpu_time": float,
"total_cpu_time": None, # TODO
"wallclock_time": float,
"evaluations": int
"cost": float,
"incumbent": Configuration
}
"""
trajectory = []
with open(fn) as fp:
for line in fp:
entry = json.loads(line)
entry["incumbent"] = TrajLogger._convert_dict_to_config(
entry["incumbent"], cs=cs)
trajectory.append(entry)
return trajectory
@staticmethod
def _convert_dict_to_config(config_list: typing.List[str], cs: ConfigurationSpace):
# CAN BE DONE IN CONFIGSPACE
"""Since we save a configurations in a dictionary str->str we have to
try to figure out the type (int, float, str) of each parameter value
Parameters
----------
config_list: typing.List[str]
Configuration as a list of "str='str'"
cs: ConfigurationSpace
Configuration Space to translate dict object into Confiuration object
"""
config_dict = {}
for param in config_list:
k,v = param.split("=")
v = v.strip("'")
hp = cs.get_hyperparameter(k)
if isinstance(hp, FloatHyperparameter):
v = float(v)
elif isinstance(hp, IntegerHyperparameter):
v = int(v)
config_dict[k] = v
config = Configuration(configuration_space=cs, values=config_dict)
config.origin = "External Trajectory"
return config
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# This is run by the "run-tests" script.
import unittest
import signal
import socket
class TestTimeout(unittest.TestCase):
def test_timeout(self):
port = 12346
s = socket.socket()
s.connect(("0.0.0.0", port))
# Assumes the server has --timeout 1
signal.alarm(3)
# Expect to get EOF before the alarm fires.
ret = s.recv(1024)
signal.alarm(0)
s.close()
self.assertEqual(ret, b'')
if __name__ == '__main__':
unittest.main()
# vim:set ts=4 sw=4 et:
|
nilq/baby-python
|
python
|
import numpy as np
matrizquadrada = int(input("Definir o tamanho Matriz: "))
Geracoes = int(input("Definir quantas geracoes: "))
# Considerando 1 como celula viva e 0 como celula morta.
# Rodar o jogo no terminal
# A cada geração irá aplicar as condições do jogo, criando assim uma nova matriz atualizada.
def atualizacao(localCelula,N):
valorAtualizado = np.zeros([N,N],dtype = int) #Receberá o valor atualizado, conforme as condicoes
for linha in range(matrizquadrada):
for celula in range(matrizquadrada):
somaVizinhos = 0
if linha==0 and celula ==0:
#Não tem vizinhos acima nem à esquerda
somaVizinhos = localCelula[linha][celula + 1] + localCelula[linha + 1][celula] + localCelula[linha + 1][celula + 1]
elif linha==0 and celula <N-1: #N-1 == ultimo elemento da lista
#Não tem vizinhos acima
somaVizinhos = localCelula[linha][celula - 1] + localCelula[linha][celula + 1] + localCelula[linha + 1][celula - 1] + localCelula[linha + 1][celula] + localCelula[linha+ 1][celula + 1]
elif linha == 0 and celula == N-1:
#Não tem vizinhos acima nem à direita
somaVizinhos = localCelula[linha][celula - 1] + localCelula[linha + 1][celula - 1] + localCelula[linha + 1][celula]
elif linha > 0 and linha < N-1 and celula == 0:
#Não tem vizinhos à esquerda
somaVizinhos = localCelula[linha - 1][celula] + localCelula[linha - 1][celula + 1] + localCelula[linha][celula + 1] + localCelula[linha + 1][celula] + localCelula[linha + 1][celula + 1]
elif linha > 0 and linha < N-1 and celula > 0 and celula < N-1:
#Tem todos os vizinhos
somaVizinhos = localCelula[linha - 1][celula - 1] + localCelula[linha - 1][celula] + localCelula[linha - 1] [celula+ 1] + localCelula[linha][celula - 1] + localCelula[linha][celula + 1] + localCelula[linha + 1][celula - 1] + localCelula[linha + 1][celula] + localCelula[linha + 1][celula + 1]
elif linha > 0 and linha < N-1 and celula == N-1:
#Não tem vizinhos à direita
somaVizinhos = localCelula[linha - 1][celula - 1] + localCelula[linha - 1][celula] + localCelula[linha][celula - 1] + localCelula[linha + 1][celula - 1] + localCelula[linha + 1][celula]
elif linha ==N-1 and celula == 0:
#Não tem vizinhos abaixo e á esquerda
somaVizinhos = localCelula[linha - 1][celula] + localCelula[linha - 1][celula + 1] + localCelula[linha][celula + 1]
elif linha == N-1 and celula > 0 and celula < N-1:
#'Não tem vizinhos abaixo
somaVizinhos = localCelula[linha - 1][celula - 1] + localCelula[linha - 1][celula] + localCelula[linha - 1][celula + 1] + localCelula[linha][celula - 1] + localCelula[linha][celula + 1]
elif linha == N -1 and celula == N-1:
#Não tem vizinhos abaixo e à direita
somaVizinhos = localCelula[linha - 1][celula - 1] + localCelula[linha - 1][celula]+ localCelula[linha][celula - 1]
#Qualquer célula viva com menos de dois vizinhos vivos morre de solidão.
if localCelula[linha][celula] == 1 and somaVizinhos < 2:
valorAtualizado [ linha][celula] = 0
#Receberá o valor atualizado, conforme as condicoes
#Qualquer célula viva com mais de três vizinhos vivos morre de superpopulação.
if localCelula[linha][celula] == 1 and somaVizinhos > 3:
valorAtualizado [ linha][celula] = 0
#Receberá o valor atualizado, conforme as condicoes
#Qualquer célula morta com exatamente três vizinhos vivos se torna uma célula viva.
if localCelula[linha][celula] == 0 and somaVizinhos == 3:
valorAtualizado [ linha][celula] = 1
#Receberá o valor atualizado, conforme as condicoes
#Qualquer célula viva com dois ou três vizinhos vivos continua no mesmo estado para a próxima geração.
if localCelula[linha][celula] ==1 and (somaVizinhos== 2 or somaVizinhos== 3):
valorAtualizado [ linha][celula] = 1
#Receberá o valor atualizado, conforme as condicoes
return(valorAtualizado )
#Começar
localCelula = np.random.randint(0,2,[matrizquadrada,matrizquadrada])
contGeracao = 1
for geracao in range(Geracoes):
localCelula = atualizacao(localCelula,matrizquadrada)
print("\n {} - Geracao \n".format(contGeracao) )
print(localCelula)
contGeracao +=1
|
nilq/baby-python
|
python
|
import itertools
def reduce_undefined(obj):
if isinstance(obj, dict):
r = {}
for k, v in obj.items():
if v == UNDEFINED:
pass
else:
r[k] = reduce_undefined(v)
return r
elif isinstance(obj, (tuple, list)):
r = []
for v in itertools.dropwhile(lambda x:x==UNDEFINED, reversed(obj)):
r.insert(0, reduce_undefined(v))
return r
return obj
from xjson.xtypes import _Undefined, UNDEFINED, Indef, INDEF, ForeignObject
|
nilq/baby-python
|
python
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import abc
import time
import datetime
import importlib
from pathlib import Path
from typing import Type, Iterable
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import pandas as pd
from tqdm import tqdm
from loguru import logger
from joblib import Parallel, delayed
from qlib.utils import code_to_fname
class BaseCollector(abc.ABC):
CACHE_FLAG = "CACHED"
NORMAL_FLAG = "NORMAL"
DEFAULT_START_DATETIME_1D = pd.Timestamp("2000-01-01")
DEFAULT_START_DATETIME_1MIN = pd.Timestamp(datetime.datetime.now() - pd.Timedelta(days=5 * 6 - 1)).date()
DEFAULT_END_DATETIME_1D = pd.Timestamp(datetime.datetime.now() + pd.Timedelta(days=1)).date()
DEFAULT_END_DATETIME_1MIN = DEFAULT_END_DATETIME_1D
INTERVAL_1min = "1min"
INTERVAL_1d = "1d"
def __init__(
self,
save_dir: [str, Path],
start=None,
end=None,
interval="1d",
max_workers=1,
max_collector_count=2,
delay=0,
check_data_length: int = None,
limit_nums: int = None,
):
"""
Parameters
----------
save_dir: str
instrument save dir
max_workers: int
workers, default 1; Concurrent number, default is 1; when collecting data, it is recommended that max_workers be set to 1
max_collector_count: int
default 2
delay: float
time.sleep(delay), default 0
interval: str
freq, value from [1min, 1d], default 1d
start: str
start datetime, default None
end: str
end datetime, default None
check_data_length: int
check data length, if not None and greater than 0, each symbol will be considered complete if its data length is greater than or equal to this value, otherwise it will be fetched again, the maximum number of fetches being (max_collector_count). By default None.
limit_nums: int
using for debug, by default None
"""
self.save_dir = Path(save_dir).expanduser().resolve()
self.save_dir.mkdir(parents=True, exist_ok=True)
self.delay = delay
self.max_workers = max_workers
self.max_collector_count = max_collector_count
self.mini_symbol_map = {}
self.interval = interval
self.check_data_length = max(int(check_data_length) if check_data_length is not None else 0, 0)
self.start_datetime = self.normalize_start_datetime(start)
self.end_datetime = self.normalize_end_datetime(end)
self.instrument_list = sorted(set(self.get_instrument_list()))
if limit_nums is not None:
try:
self.instrument_list = self.instrument_list[: int(limit_nums)]
except Exception as e:
logger.warning(f"Cannot use limit_nums={limit_nums}, the parameter will be ignored")
def normalize_start_datetime(self, start_datetime: [str, pd.Timestamp] = None):
return (
pd.Timestamp(str(start_datetime))
if start_datetime
else getattr(self, f"DEFAULT_START_DATETIME_{self.interval.upper()}")
)
def normalize_end_datetime(self, end_datetime: [str, pd.Timestamp] = None):
return (
pd.Timestamp(str(end_datetime))
if end_datetime
else getattr(self, f"DEFAULT_END_DATETIME_{self.interval.upper()}")
)
@abc.abstractmethod
def get_instrument_list(self):
raise NotImplementedError("rewrite get_instrument_list")
@abc.abstractmethod
def normalize_symbol(self, symbol: str):
"""normalize symbol"""
raise NotImplementedError("rewrite normalize_symbol")
@abc.abstractmethod
def get_data(
self, symbol: str, interval: str, start_datetime: pd.Timestamp, end_datetime: pd.Timestamp
) -> pd.DataFrame:
"""get data with symbol
Parameters
----------
symbol: str
interval: str
value from [1min, 1d]
start_datetime: pd.Timestamp
end_datetime: pd.Timestamp
Returns
---------
pd.DataFrame, "symbol" and "date"in pd.columns
"""
raise NotImplementedError("rewrite get_timezone")
def sleep(self):
time.sleep(self.delay)
def _simple_collector(self, symbol: str):
"""
Parameters
----------
symbol: str
"""
self.sleep()
df = self.get_data(symbol, self.interval, self.start_datetime, self.end_datetime)
_result = self.NORMAL_FLAG
if self.check_data_length > 0:
_result = self.cache_small_data(symbol, df)
if _result == self.NORMAL_FLAG:
self.save_instrument(symbol, df)
return _result
def save_instrument(self, symbol, df: pd.DataFrame):
"""save instrument data to file
Parameters
----------
symbol: str
instrument code
df : pd.DataFrame
df.columns must contain "symbol" and "datetime"
"""
if df is None or df.empty:
logger.warning(f"{symbol} is empty")
return
symbol = self.normalize_symbol(symbol)
symbol = code_to_fname(symbol)
instrument_path = self.save_dir.joinpath(f"{symbol}.csv")
df["symbol"] = symbol
if instrument_path.exists():
_old_df = pd.read_csv(instrument_path)
df = _old_df.append(df, sort=False)
df.to_csv(instrument_path, index=False)
def cache_small_data(self, symbol, df):
if len(df) < self.check_data_length:
logger.warning(f"the number of trading days of {symbol} is less than {self.check_data_length}!")
_temp = self.mini_symbol_map.setdefault(symbol, [])
_temp.append(df.copy())
return self.CACHE_FLAG
else:
if symbol in self.mini_symbol_map:
self.mini_symbol_map.pop(symbol)
return self.NORMAL_FLAG
def _collector(self, instrument_list):
error_symbol = []
res = Parallel(n_jobs=self.max_workers)(
delayed(self._simple_collector)(_inst) for _inst in tqdm(instrument_list)
)
for _symbol, _result in zip(instrument_list, res):
if _result != self.NORMAL_FLAG:
error_symbol.append(_symbol)
print(error_symbol)
logger.info(f"error symbol nums: {len(error_symbol)}")
logger.info(f"current get symbol nums: {len(instrument_list)}")
error_symbol.extend(self.mini_symbol_map.keys())
return sorted(set(error_symbol))
def collector_data(self):
"""collector data"""
logger.info("start collector data......")
instrument_list = self.instrument_list
for i in range(self.max_collector_count):
if not instrument_list:
break
logger.info(f"getting data: {i+1}")
instrument_list = self._collector(instrument_list)
logger.info(f"{i+1} finish.")
for _symbol, _df_list in self.mini_symbol_map.items():
_df = pd.concat(_df_list, sort=False)
if not _df.empty:
self.save_instrument(_symbol, _df.drop_duplicates(["date"]).sort_values(["date"]))
if self.mini_symbol_map:
logger.warning(f"less than {self.check_data_length} instrument list: {list(self.mini_symbol_map.keys())}")
logger.info(f"total {len(self.instrument_list)}, error: {len(set(instrument_list))}")
class BaseNormalize(abc.ABC):
def __init__(self, date_field_name: str = "date", symbol_field_name: str = "symbol", **kwargs):
"""
Parameters
----------
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
"""
self._date_field_name = date_field_name
self._symbol_field_name = symbol_field_name
self.kwargs = kwargs
self._calendar_list = self._get_calendar_list()
@abc.abstractmethod
def normalize(self, df: pd.DataFrame) -> pd.DataFrame:
# normalize
raise NotImplementedError("")
@abc.abstractmethod
def _get_calendar_list(self) -> Iterable[pd.Timestamp]:
"""Get benchmark calendar"""
raise NotImplementedError("")
class Normalize:
def __init__(
self,
source_dir: [str, Path],
target_dir: [str, Path],
normalize_class: Type[BaseNormalize],
max_workers: int = 16,
date_field_name: str = "date",
symbol_field_name: str = "symbol",
**kwargs,
):
"""
Parameters
----------
source_dir: str or Path
The directory where the raw data collected from the Internet is saved
target_dir: str or Path
Directory for normalize data
normalize_class: Type[YahooNormalize]
normalize class
max_workers: int
Concurrent number, default is 16
date_field_name: str
date field name, default is date
symbol_field_name: str
symbol field name, default is symbol
"""
if not (source_dir and target_dir):
raise ValueError("source_dir and target_dir cannot be None")
self._source_dir = Path(source_dir).expanduser()
self._target_dir = Path(target_dir).expanduser()
self._target_dir.mkdir(parents=True, exist_ok=True)
self._date_field_name = date_field_name
self._symbol_field_name = symbol_field_name
self._end_date = kwargs.get("end_date", None)
self._max_workers = max_workers
self._normalize_obj = normalize_class(
date_field_name=date_field_name, symbol_field_name=symbol_field_name, **kwargs
)
def _executor(self, file_path: Path):
file_path = Path(file_path)
df = pd.read_csv(file_path)
df = self._normalize_obj.normalize(df)
if df is not None and not df.empty:
if self._end_date is not None:
_mask = pd.to_datetime(df[self._date_field_name]) <= pd.Timestamp(self._end_date)
df = df[_mask]
df.to_csv(self._target_dir.joinpath(file_path.name), index=False)
def normalize(self):
logger.info("normalize data......")
with ProcessPoolExecutor(max_workers=self._max_workers) as worker:
file_list = list(self._source_dir.glob("*.csv"))
with tqdm(total=len(file_list)) as p_bar:
for _ in worker.map(self._executor, file_list):
p_bar.update()
class BaseRun(abc.ABC):
def __init__(self, source_dir=None, normalize_dir=None, max_workers=1, interval="1d"):
"""
Parameters
----------
source_dir: str
The directory where the raw data collected from the Internet is saved, default "Path(__file__).parent/source"
normalize_dir: str
Directory for normalize data, default "Path(__file__).parent/normalize"
max_workers: int
Concurrent number, default is 1; Concurrent number, default is 1; when collecting data, it is recommended that max_workers be set to 1
interval: str
freq, value from [1min, 1d], default 1d
"""
if source_dir is None:
source_dir = Path(self.default_base_dir).joinpath("source")
self.source_dir = Path(source_dir).expanduser().resolve()
self.source_dir.mkdir(parents=True, exist_ok=True)
if normalize_dir is None:
normalize_dir = Path(self.default_base_dir).joinpath("normalize")
self.normalize_dir = Path(normalize_dir).expanduser().resolve()
self.normalize_dir.mkdir(parents=True, exist_ok=True)
self._cur_module = importlib.import_module("collector")
self.max_workers = max_workers
self.interval = interval
@property
@abc.abstractmethod
def collector_class_name(self):
raise NotImplementedError("rewrite collector_class_name")
@property
@abc.abstractmethod
def normalize_class_name(self):
raise NotImplementedError("rewrite normalize_class_name")
@property
@abc.abstractmethod
def default_base_dir(self) -> [Path, str]:
raise NotImplementedError("rewrite default_base_dir")
def download_data(
self,
max_collector_count=2,
delay=0,
start=None,
end=None,
check_data_length: int = None,
limit_nums=None,
**kwargs,
):
"""download data from Internet
Parameters
----------
max_collector_count: int
default 2
delay: float
time.sleep(delay), default 0
start: str
start datetime, default "2000-01-01"
end: str
end datetime, default ``pd.Timestamp(datetime.datetime.now() + pd.Timedelta(days=1))``
check_data_length: int
check data length, if not None and greater than 0, each symbol will be considered complete if its data length is greater than or equal to this value, otherwise it will be fetched again, the maximum number of fetches being (max_collector_count). By default None.
limit_nums: int
using for debug, by default None
Examples
---------
# get daily data
$ python collector.py download_data --source_dir ~/.qlib/instrument_data/source --region CN --start 2020-11-01 --end 2020-11-10 --delay 0.1 --interval 1d
# get 1m data
$ python collector.py download_data --source_dir ~/.qlib/instrument_data/source --region CN --start 2020-11-01 --end 2020-11-10 --delay 0.1 --interval 1m
"""
_class = getattr(self._cur_module, self.collector_class_name) # type: Type[BaseCollector]
_class(
self.source_dir,
max_workers=self.max_workers,
max_collector_count=max_collector_count,
delay=delay,
start=start,
end=end,
interval=self.interval,
check_data_length=check_data_length,
limit_nums=limit_nums,
**kwargs,
).collector_data()
def normalize_data(self, date_field_name: str = "date", symbol_field_name: str = "symbol", **kwargs):
"""normalize data
Parameters
----------
date_field_name: str
date field name, default date
symbol_field_name: str
symbol field name, default symbol
Examples
---------
$ python collector.py normalize_data --source_dir ~/.qlib/instrument_data/source --normalize_dir ~/.qlib/instrument_data/normalize --region CN --interval 1d
"""
_class = getattr(self._cur_module, self.normalize_class_name)
yc = Normalize(
source_dir=self.source_dir,
target_dir=self.normalize_dir,
normalize_class=_class,
max_workers=self.max_workers,
date_field_name=date_field_name,
symbol_field_name=symbol_field_name,
**kwargs,
)
yc.normalize()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
## This file comes from Jennifer Fourquier's excellent ghost-tree project
## Some modifications by Lela Andrews to fit within akutils framework
##
## Ghost-tree is provided under BSD license
##
## Copyright (c) 2015--, ghost-tree development team.
## All rights reserved.
##
"""
This file can be downloaded and used to create a .txt file containing only
the accession numbers from the ghost-tree.nwk that you plan to use for your
analyses.
You must have skbio installed. http://scikit-bio.org/
You will then use "ghost_tree_tips.txt" output file containing the accession
numbers to filter your .biom table so that it contains only the OTUs that
are in the ghost-tree.nwk that you are using.
http://qiime.org/scripts/filter_otus_from_otu_table.html
Use the required arguments and the following two optional arguments:
-e, --otu_ids_to_exclude_fp
(provide the text file containing OTU ids to exclude)
--negate_ids_to_exclude
(this will keep OTUs in otu_ids_to_exclude_fp, rather than discard them)
"""
## Import modules
import os
from skbio import TreeNode
## Read in variables from bash and set tips file name
intree = os.getenv("tree")
randcode = os.getenv("randcode")
tempdir = os.getenv("tempdir")
tipsfile = os.path.join(tempdir + "/" + randcode + "_ghost_tree_tips.txt")
## Filter OTU table against supplied tree
ghosttree = TreeNode.read(intree)
output = open(tipsfile, "w")
for node in ghosttree.tips():
output.write(str(node.name)+"\n")
output.close()
|
nilq/baby-python
|
python
|
from distutils import log
from setuptools import setup
try:
from setuptools.command import egg_info
egg_info.write_toplevel_names
except (ImportError, AttributeError):
pass
else:
def _top_level_package(name):
return name.split('.', 1)[0]
def _hacked_write_toplevel_names(cmd, basename, filename):
pkgs = dict.fromkeys(
[_top_level_package(k)
for k in cmd.distribution.iter_distribution_names()
if _top_level_package(k) != "twisted"
]
)
cmd.write_file("top-level names", filename, '\n'.join(pkgs) + '\n')
egg_info.write_toplevel_names = _hacked_write_toplevel_names
setup(name='dumbserver',
version='1.0',
description='Mock several REST services in one go!',
url='https://github.com/varunmulloli/dumbserver',
download_url = 'https://github.com/varunmulloli/dumbserver/tarball/1.0',
author='Varun Mulloli',
author_email='mulloli@me.com',
license='MIT',
packages=['dumbserver','twisted.plugins'],
install_requires=['PyYAML','treelib','Twisted'],
keywords=['mockserver', 'mock server', 'service', 'http', "REST"],
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Framework :: Twisted",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Quality Assurance",
"Topic :: Software Development :: Testing"
]
)
try:
from twisted.plugin import IPlugin, getPlugins
except ImportError:
pass
else:
list(getPlugins(IPlugin))
|
nilq/baby-python
|
python
|
import sys
from collections import OrderedDict
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as functional
from ptsemseg.models._util import try_index
from modules import IdentityResidualBlock, ABN, GlobalAvgPool2d
from modules.bn import ABN, InPlaceABN, InPlaceABNSync
class abn(nn.Module):
def __init__(self,
structure = [3, 3, 6, 3, 1, 1],
norm_act=partial(InPlaceABN, activation="leaky_relu", slope=.01), # PUT THIS INSIDE??????
n_classes=0,
dilation=(1, 2, 4, 4),
in_channels_head = 4096, # THIS AND BELOW ARGS FOR HEAD, VALS TAKEN FROM TEST FILE
out_channels_head = 256,
hidden_channels=256,
dilations_head=(12, 24, 36),
pooling_size=(84, 84)):
"""Wider ResNet with pre-activation (identity mapping) blocks. With the DeeplabV3 head.
This variant uses down-sampling by max-pooling in the first two blocks and by strided convolution in the others.
Parameters
----------
structure : list of int
Number of residual blocks in each of the six modules of the network.
norm_act : callable
Function to create normalization / activation Module.
classes : int
If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end
of the network.
dilation : bool
If `True` apply dilation to the last three modules and change the down-sampling factor from 32 to 8.
"""
super(abn, self).__init__()
self.structure = structure
self.dilation = dilation
if len(structure) != 6:
raise ValueError("Expected a structure with six values")
# Initial layers
self.mod1 = nn.Sequential(OrderedDict([
("conv1", nn.Conv2d(3, 64, 3, stride=1, padding=1, bias=False))
]))
# Groups of residual blocks
in_channels = 64
channels = [(128, 128), (256, 256), (512, 512), (512, 1024), (512, 1024, 2048), (1024, 2048, 4096)]
for mod_id, num in enumerate(structure):
# Create blocks for module
blocks = []
for block_id in range(num):
if not dilation:
dil = 1
stride = 2 if block_id == 0 and 2 <= mod_id <= 4 else 1
else:
if mod_id == 3:
dil = 2
elif mod_id > 3:
dil = 4
else:
dil = 1
stride = 2 if block_id == 0 and mod_id == 2 else 1
if mod_id == 4:
drop = partial(nn.Dropout2d, p=0.3)
elif mod_id == 5:
drop = partial(nn.Dropout2d, p=0.5)
else:
drop = None
blocks.append((
"block%d" % (block_id + 1),
IdentityResidualBlock(in_channels, channels[mod_id], norm_act=norm_act, stride=stride, dilation=dil,
dropout=drop)
))
# Update channels and p_keep
in_channels = channels[mod_id][-1]
# Create module
if mod_id < 2:
self.add_module("pool%d" % (mod_id + 2), nn.MaxPool2d(3, stride=2, padding=1))
self.add_module("mod%d" % (mod_id + 2), nn.Sequential(OrderedDict(blocks)))
# Pooling and predictor
self.bn_out = norm_act(in_channels)
# if n_classes != 0:
# self.classifier = nn.Sequential(OrderedDict([
# ("avg_pool", GlobalAvgPool2d()),
# ("fc", nn.Linear(in_channels, n_classes))
# ]))
####### HEAD
self.pooling_size = pooling_size
# IN THE PAPER THEY USE 9 INSTEAD OF 3 HERE. BUT IN THE GIT TEST FILE THEY USE 3 AS IT USES THESE IN DEEPLAB.PY. SUGGESTS THEIR BEST RESULT IS WITH 3
self.map_convs = nn.ModuleList([
nn.Conv2d(in_channels_head, hidden_channels, 1, bias=False),
nn.Conv2d(in_channels_head, hidden_channels, 3, bias=False, dilation=dilations_head[0], padding=dilations_head[0]),
nn.Conv2d(in_channels_head, hidden_channels, 3, bias=False, dilation=dilations_head[1], padding=dilations_head[1]),
nn.Conv2d(in_channels_head, hidden_channels, 3, bias=False, dilation=dilations_head[2], padding=dilations_head[2])
])
self.map_bn = norm_act(hidden_channels * 4)
self.global_pooling_conv = nn.Conv2d(in_channels_head, hidden_channels, 1, bias=False)
self.global_pooling_bn = norm_act(hidden_channels)
self.red_conv = nn.Conv2d(hidden_channels * 4, out_channels_head, 1, bias=False)
self.pool_red_conv = nn.Conv2d(hidden_channels, out_channels_head, 1, bias=False)
self.red_bn = norm_act(out_channels_head)
self.reset_parameters(self.map_bn.activation, self.map_bn.slope)
self.cls = nn.Conv2d(out_channels_head, n_classes, 1)
def reset_parameters(self, activation, slope):
gain = nn.init.calculate_gain(activation, slope)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data, gain)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, ABN):
if hasattr(m, "weight") and m.weight is not None:
nn.init.constant_(m.weight, 1)
if hasattr(m, "bias") and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, img):
#print("FORWARD: START")
out_size = img.shape[-2:] # maybe move to init
out = self.mod1(img)
out = self.mod2(self.pool2(out))
out = self.mod3(self.pool3(out))
out = self.mod4(out)
out = self.mod5(out)
out = self.mod6(out)
out = self.mod7(out)
out_body = self.bn_out(out)
#print("FORWARD: END OF BODY")
####### HEAD
# Map convolutions
out = torch.cat([m(out_body) for m in self.map_convs], dim=1)
out = self.map_bn(out)
out = self.red_conv(out)
# Global pooling
pool = self._global_pooling(out_body)
pool = self.global_pooling_conv(pool)
pool = self.global_pooling_bn(pool)
pool = self.pool_red_conv(pool)
if self.training or self.pooling_size is None:
pool = pool.repeat(1, 1, out_body.size(2), out_body.size(3))
out += pool
out = self.red_bn(out)
out = self.cls(out)
#out = functional.interpolate(out, size=out_size, mode="bilinear")
out = functional.upsample(out, size=out_size, mode="bilinear") # gives deprecation warning
# Note: Mapillary use online bootstrapping for training which is not included here.
#print("FORWARD: END")
return out
def _global_pooling(self, x):
if self.training or self.pooling_size is None:
pool = x.view(x.size(0), x.size(1), -1).mean(dim=-1)
pool = pool.view(x.size(0), x.size(1), 1, 1)
else:
pooling_size = (min(try_index(self.pooling_size, 0), x.shape[2]),
min(try_index(self.pooling_size, 1), x.shape[3]))
padding = (
(pooling_size[1] - 1) // 2,
(pooling_size[1] - 1) // 2 if pooling_size[1] % 2 == 1 else (pooling_size[1] - 1) // 2 + 1,
(pooling_size[0] - 1) // 2,
(pooling_size[0] - 1) // 2 if pooling_size[0] % 2 == 1 else (pooling_size[0] - 1) // 2 + 1
)
pool = functional.avg_pool2d(x, pooling_size, stride=1)
pool = functional.pad(pool, pad=padding, mode="replicate")
return pool
|
nilq/baby-python
|
python
|
import logging
from flask import Flask
from flask.logging import default_handler
from flask_logging_decorator import trace
app = Flask(__name__)
app.logger.setLevel(logging.WARN)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
default_handler.setFormatter(formatter)
@app.route('/', methods=['GET', 'POST'])
@trace(logging.ERROR)
def index():
return 'hello'
@app.route('/foo', methods=['GET', 'POST'])
@trace()
def foo():
app.logger.warn('warn')
app.logger.error('error')
app.logger.info('info')
app.logger.critical('critical')
app.logger.debug('debug')
return 'hello'
if __name__ == '__main__':
app.run()
|
nilq/baby-python
|
python
|
import logging
import os
import torch
from transformers import BertTokenizer
from .data_cls import BertDataBunch
from .learner_cls import BertLearner
from .modeling import (
BertForMultiLabelSequenceClassification,
XLNetForMultiLabelSequenceClassification,
RobertaForMultiLabelSequenceClassification,
DistilBertForMultiLabelSequenceClassification,
CamembertForMultiLabelSequenceClassification,
AlbertForMultiLabelSequenceClassification,
)
from transformers import (
WEIGHTS_NAME,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
CamembertConfig,
CamembertForSequenceClassification,
CamembertTokenizer,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
)
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
MODEL_CLASSES = {
"bert": (
BertConfig,
(BertForSequenceClassification, BertForMultiLabelSequenceClassification),
BertTokenizer,
),
"xlnet": (
XLNetConfig,
(XLNetForSequenceClassification, XLNetForMultiLabelSequenceClassification),
XLNetTokenizer,
),
"xlm": (
XLMConfig,
(XLMForSequenceClassification, XLMForSequenceClassification),
XLMTokenizer,
),
"roberta": (
RobertaConfig,
(RobertaForSequenceClassification, RobertaForMultiLabelSequenceClassification),
RobertaTokenizer,
),
"distilbert": (
DistilBertConfig,
(
DistilBertForSequenceClassification,
DistilBertForMultiLabelSequenceClassification,
),
DistilBertTokenizer,
),
"albert": (
AlbertConfig,
(AlbertForSequenceClassification, AlbertForMultiLabelSequenceClassification),
AlbertTokenizer,
),
"camembert": (
CamembertConfig,
(
CamembertForSequenceClassification,
CamembertForMultiLabelSequenceClassification,
),
CamembertTokenizer,
),
}
class BertClassificationPredictor(object):
def __init__(
self,
model_path,
label_path,
multi_label=False,
model_type="bert",
do_lower_case=True,
):
self.model_path = model_path
self.label_path = label_path
self.multi_label = multi_label
self.model_type = model_type
self.do_lower_case = do_lower_case
self.learner = self.get_learner()
def get_learner(self):
_, _, tokenizer_class = MODEL_CLASSES[self.model_type]
# instantiate the new tokeniser object using the tokeniser name
tokenizer = tokenizer_class.from_pretrained(
self.model_path, do_lower_case=self.do_lower_case
)
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
databunch = BertDataBunch(
self.label_path,
self.label_path,
tokenizer,
train_file=None,
val_file=None,
batch_size_per_gpu=32,
max_seq_length=512,
multi_gpu=False,
multi_label=self.multi_label,
model_type=self.model_type,
no_cache=True,
)
learner = BertLearner.from_pretrained_model(
databunch,
self.model_path,
metrics=[],
device=device,
logger=logging.getLogger(),
output_dir=None,
warmup_steps=0,
multi_gpu=False,
is_fp16=False,
multi_label=self.multi_label,
logging_steps=0,
)
return learner
def predict_batch(self, texts):
return self.learner.predict_batch(texts)
def predict(self, text):
predictions = self.predict_batch([text])[0]
return predictions
|
nilq/baby-python
|
python
|
"""
handles logging for:
- auth
- contact
- msg
- label
- report
- att
modules
"""
import csv
from datetime import datetime
import os
import shutil
from config import config
log_dir = config.data["log"]["log_dir"]
logfiles = config.data["log"]["logfiles"]
def get_logpath(logtype):
filename = logfiles[logtype]
return os.path.join(log_dir, filename)
def log_data(logtype, data):
"""
logs data to specified file based on logtype
"""
for datum in data:
datum['timestamp'] = timestamp()
datum = stringify_dict(datum)
write_or_append(logtype, data)
def timestamp():
"""
stringifies current time
"""
return datetime.now().strftime('%Y-%m-%d_%T')
def stringify_dict(datum):
"""
returns log data with all values as strings
"""
return dict((x, str(datum[x])) for x in datum)
def write_or_append(logtype, data):
"""
checks if file exists and appends,
else creates and writes (starting with headers
"""
path = get_logpath(logtype)
method = 'w'
if check_file_exists(logtype) and check_schema_match(logtype, data):
# append if log exists and schema matches
method = 'a'
elif check_file_exists(logtype) and not check_schema_match(logtype, data):
# log exists, but schema mismatch ...
# backup old log with timestamp,
# then overwrite main log
shutil.move(path, path.replace('.', timestamp() + '.'))
logfile = open(path, method)
write_log(logfile, method, data)
logfile.close()
def check_file_exists(logtype):
"""
returns True if path exists
"""
return os.path.isfile(get_logpath(logtype))
def check_schema_match(logtype, data):
"""
verifies existing file has same headers as data we're appending
"""
# check if new data matches logfile schema
return sorted(data[0].keys()) == \
sorted(csv.DictReader(get_logpath(logtype)).fieldnames)
def write_log(logfile, method, data):
"""
writes data to specified file,
appending if it already exists
or writing if it doesn't
"""
logcsv = csv.DictWriter(logfile, list(data[0].keys()))
if method == 'w':
logcsv.writeheader()
for row in data:
logcsv.writerow(row)
|
nilq/baby-python
|
python
|
import io
import json
import unittest
from datetime import datetime
from unittest.mock import Mock
import boto3
from botocore.response import StreamingBody
from botocore.stub import Stubber, ANY
from redis import StrictRedis
from s3_log_shipper.parsers import ParserManager, Parser
from s3_log_shipper.shipper import RedisLogShipper
class RedisLogShipperSpec(unittest.TestCase):
under_test: RedisLogShipper
def setUp(self) -> None:
self.parser_manager = Mock(ParserManager)
client = boto3.client("s3")
self.s3_client: Stubber = Stubber(client)
self.redis_client = Mock(StrictRedis)
self.under_test = RedisLogShipper(
self.redis_client, self.parser_manager, self.s3_client.client
)
def test_ship(self):
parser = Mock(Parser)
timestamp = datetime.now().isoformat()
path_groks = {"timestamp": timestamp, "message": "Hello", "level": "INFO"}
log_groks = {"cluster": "foo12345", "node": "abc1234"}
parser.parse_log.return_value = path_groks
self.parser_manager.get_parser.return_value = parser, log_groks
self.s3_client.add_response(
method="get_object",
service_response={"Body": StreamingBody(io.BytesIO(b"HELLO"), 5)},
expected_params={"Bucket": ANY, "Key": ANY},
)
self.s3_client.activate()
self.under_test.ship("foo", "bar.log")
expected = log_groks.copy()
expected.update(path_groks)
for call in self.redis_client.rpush.call_args_list:
q, data = call[0]
self.assertEqual(q, "logstash")
self.assertEqual(json.loads(data), expected)
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
from pyramid.config import Configurator
from pyramid.static import static_view
import kinto.core
def includeme(config):
config.scan("kinto.tests.core.testapp.views")
# Add an example route with trailing slash (here to serve static files).
# This is only used to test 404 redirection in ``test_views_errors.py``
static = static_view('kinto:tests/core/testapp/static', use_subpath=True)
config.add_route('catchall_static', '/static/*subpath')
config.add_view(static, route_name="catchall_static")
def main(settings=None, config=None, *args, **additional_settings):
if settings is None:
settings = {}
settings.update(additional_settings)
if config is None:
config = Configurator(settings=settings)
kinto.core.initialize(config, version='0.0.1')
config.include(includeme)
app = config.make_wsgi_app()
# Install middleware (no-op if not enabled in setting)
return kinto.core.install_middlewares(app, settings)
|
nilq/baby-python
|
python
|
class BaseFilter:
"""
This is the reference implementation for all filters/hooks.
Just passes the data as-is without changing it.
"""
def register(self, kernel, shell):
self.kernel = kernel
self.shell = shell
shell.events.register('post_run_cell', self.post_run_cell)
shell.input_transformers_cleanup.append(self.process_text_input)
# You can also perform more advanced modifications, see:
# https://ipython.readthedocs.io/en/stable/config/inputtransforms.html#ast-transformations
def process_text_input(self, lines):
return lines
def process_text_output(self, text):
"""
This is called from the kernel when displaying the results of a command back to the User
"""
pass
# This is called from the kernel before feeding input into the IPython Shell
def process_run_cell(self, code, options):
"""
Modifies the arguments and code passed to shell.run_cell()
options is a dict like
{
'silent': False,
'store_history': True,
'user_expressions': None
}
that can be modified in place to change behaviour.
Returns: the new code to run
"""
return code
# This is called from the kernel before returning completion data
def process_completion(self, code, cursor_pos, completion_data):
"""
This is called from the kernel before returning completion data
completion_data is a dict like
{
'matches' : matches,
'cursor_end' : cursor_pos,
'cursor_start' : cursor_pos - len(txt),
'metadata' : {},
'status' : 'ok'
}
"""
return completion_data
def post_run_cell(self, result):
"""
This is called after executing a cell with the result of that
"""
pass
|
nilq/baby-python
|
python
|
"""
常见的颜色名称
"""
color_dict={
"almond":(239,222,205),
"amaranth":(229,43,80),
"amazon":(59,122,87),
"amber":(255,191,0),
"sae":(255,126,0),
"amethyst":(153,102,204),
"ao":(0,128,0),
"apricot":(251,206,177),
"aqua":(0,255,255),
"aquamarine":(127,255,212),
"arsenic":(59,68,75),
"artichoke":(143,151,121),
"asparagus":(135,169,107),
"auburn":(165,42,42),
"aureolin":(253,238,0),
"aurometalsaurus":(110,127,128),
"avocado":(86,130,3),
"azure":(0,127,255),
"bazaar":(152,119,123),
"beaver":(159,129,112),
"beige":(245,245,220),
"bisque":(255,228,196),
"bistre":(61,43,31),
"bittersweet":(254,111,94),
"black":(0,0,0),
"blond":(250,240,190),
"blue":(0,0,255),
"blueberry":(79,134,247),
"bluebonnet":(28,28,240),
"blush":(222,93,131),
"bole":(121,68,59),
"bone":(227,218,201),
"boysenberry":(135,50,96),
"brass":(181,166,66),
"bronze":(205,127,50),
"brown":(165,42,42),
"bubbles":(231,254,255),
"buff":(240,220,130),
"burgundy":(128,0,32),
"burlywood":(222,184,135),
"byzantine":(189,51,164),
"byzantium":(112,41,99),
"cadet":(83,104,114),
"camel":(193,154,107),
"capri":(0,191,255),
"cardinal":(196,30,58),
"carmine":(150,0,24),
"carnelian":(179,27,27),
"catawba":(112,54,66),
"ceil":(146,161,207),
"celadon":(172,225,175),
"celeste":(178,255,255),
"cerise":(222,49,99),
"cerulean":(0,123,167),
"chamoisee":(160,120,90),
"champagne":(247,231,206),
"charcoal":(54,69,79),
"chartreuse":(127,255,0),
"cherry":(222,49,99),
"chestnut":(149,69,53),
"chocolate":(210,105,30),
"cinereous":(152,129,123),
"cinnabar":(227,66,52),
"cinnamon":(210,105,30),
"citrine":(228,208,10),
"citron":(159,169,31),
"claret":(127,23,52),
"coal":(124,185,232),
"cobalt":(0,71,171),
"coconut":(150,90,62),
"coffee":(111,78,55),
"copper":(184,115,51),
"coquelicot":(255,56,0),
"coral":(255,127,80),
"cordovan":(137,63,69),
"corn":(251,236,93),
"cornsilk":(255,248,220),
"cream":(255,253,208),
"crimson":(220,20,60),
"cyan":(0,255,255),
"daffodil":(255,255,49),
"dandelion":(240,225,48),
"deer":(186,135,89),
"denim":(21,96,189),
"desert":(193,154,107),
"desire":(234,60,83),
"diamond":(185,242,255),
"dirt":(155,118,83),
"drab":(150,113,23),
"ebony":(85,93,80),
"ecru":(194,178,128),
"eggplant":(97,64,81),
"eggshell":(240,234,214),
"emerald":(80,200,120),
"eminence":(108,48,130),
"eucalyptus":(68,215,168),
"fallow":(193,154,107),
"fandango":(181,51,137),
"fawn":(229,170,112),
"feldgrau":(77,93,83),
"feldspar":(253,213,177),
"firebrick":(178,34,34),
"flame":(226,88,34),
"flattery":(107,68,35),
"flavescent":(247,233,142),
"flax":(238,220,130),
"flirt":(162,0,109),
"folly":(255,0,79),
"fuchsia":(255,0,255),
"fulvous":(228,132,0),
"gainsboro":(220,220,220),
"gamboge":(228,155,15),
"ginger":(176,101,0),
"glaucous":(96,130,182),
"glitter":(230,232,250),
"gold":(255,215,0),
"goldenrod":(218,165,32),
"grape":(111,45,168),
"gray":(128,128,128),
"green":(0,255,0),
"grullo":(169,154,134),
"harlequin":(63,255,0),
"heliotrope":(223,115,255),
"honeydew":(240,255,240),
"iceberg":(113,166,210),
"icterine":(252,247,94),
"imperial":(96,47,107),
"inchworm":(178,236,93),
"independence":(76,81,109),
"indigo":(75,0,130),
"iris":(90,79,207),
"irresistible":(179,68,108),
"isabelline":(244,240,236),
"ivory":(255,255,240),
"jade":(0,168,107),
"jasmine":(248,222,126),
"jasper":(215,59,62),
"jet":(52,52,52),
"jonquil":(244,202,22),
"keppel":(58,176,158),
"khaki":(195,176,145),
"kobe":(136,45,23),
"kobi":(231,159,196),
"lava":(207,16,32),
"lavender":(230,230,250),
"lemon":(255,247,0),
"liberty":(84,90,167),
"licorice":(26,17,16),
"lilac":(200,162,200),
"lime":(191,255,0),
"limerick":(157,194,9),
"linen":(250,240,230),
"lion":(193,154,107),
"liver":(103,76,71),
"livid":(102,153,204),
"lumber":(255,228,205),
"lust":(230,32,32),
"magenta":(255,0,255),
"magnolia":(248,244,255),
"mahogany":(192,64,0),
"maize":(251,236,93),
"malachite":(11,218,81),
"manatee":(151,154,170),
"mantis":(116,195,101),
"maroon":(128,0,0),
"mauve":(224,176,255),
"mauvelous":(239,152,170),
"melon":(253,188,180),
"mindaro":(227,249,136),
"mint":(62,180,137),
"moccasin":(250,235,215),
"mulberry":(197,75,140),
"mustard":(255,219,88),
"nyanza":(233,255,219),
"ochre":(204,119,34),
"olive":(128,128,0),
"olivine":(154,185,115),
"onyx":(53,56,57),
"orange":(255,165,0),
"orchid":(218,112,214),
"patriarch":(128,0,128),
"peach":(255,229,180),
"pear":(209,226,49),
"pearl":(234,224,200),
"peridot":(230,226,0),
"periwinkle":(204,204,255),
"persimmon":(236,88,0),
"peru":(205,133,63),
"phlox":(223,0,255),
"pink":(255,192,203),
"pistachio":(147,197,114),
"platinum":(229,228,226),
"plum":(221,160,221),
"popstar":(190,79,98),
"prune":(112,28,28),
"puce":(204,136,153),
"pumpkin":(255,117,24),
"purple":(128,0,128),
"purpureus":(154,78,174),
"quartz":(81,72,79),
"rackley":(93,138,168),
"rajah":(251,171,96),
"raspberry":(227,11,93),
"razzmatazz":(227,37,107),
"red":(255,0,0),
"redwood":(164,90,82),
"regalia":(82,45,128),
"rhythm":(119,118,150),
"rose":(255,0,127),
"rosewood":(101,0,11),
"ruber":(206,70,118),
"ruby":(224,17,95),
"ruddy":(255,0,40),
"rufous":(168,28,7),
"russet":(128,70,27),
"rust":(183,65,14),
"saffron":(244,196,48),
"sage":(188,184,138),
"salmon":(250,128,114),
"sand":(194,178,128),
"sandstorm":(236,213,64),
"sangria":(146,0,10),
"sapphire":(15,82,186),
"scarlet":(255,36,0),
"seashell":(255,245,238),
"sepia":(112,66,20),
"shadow":(138,121,93),
"shampoo":(255,207,241),
"sienna":(136,45,23),
"silver":(192,192,192),
"sinopia":(203,65,11),
"skobeloff":(0,116,116),
"smalt":(0,51,153),
"smitten":(200,65,134),
"smoke":(115,130,118),
"snow":(255,250,250),
"soap":(206,200,239),
"stizza":(153,0,0),
"stormcloud":(79,102,106),
"straw":(228,217,111),
"strawberry":(252,90,141),
"sunglow":(255,204,51),
"sunray":(227,171,87),
"sunset":(250,214,165),
"tan":(210,180,140),
"tangelo":(249,77,0),
"tangerine":(242,133,0),
"taupe":(72,60,50),
"teal":(0,128,128),
"telemagenta":(207,52,118),
"thistle":(216,191,216),
"timberwolf":(219,215,210),
"tomato":(255,99,71),
"toolbox":(116,108,192),
"topaz":(255,200,124),
"tulip":(255,135,141),
"tumbleweed":(222,170,136),
"turquoise":(64,224,208),
"tuscan":(250,214,165),
"tuscany":(192,153,153),
"ube":(136,120,195),
"ultramarine":(18,10,143),
"umber":(99,81,71),
"urobilin":(225,173,33),
"vanilla":(243,229,171),
"verdigris":(67,179,174),
"vermilion":(227,66,52),
"veronica":(160,32,240),
"violet":(143,0,255),
"viridian":(64,130,109),
"waterspout":(164,244,249),
"wenge":(100,84,82),
"wheat":(245,222,179),
"white":(255,255,255),
"wine":(114,47,55),
"wisteria":(201,160,220),
"xanadu":(115,134,120),
"yellow":(255,255,0),
"zaffre":(0,20,168),
"light_blue":(173,216,230),
"light_brown":(181,101,29),
"light_cyan":(224,255,255),
"light_gray":(211,211,211),
"light_green":(144,238,144),
"light_pink":(255,182,193),
"light_yellow":(255,255,224),
}
|
nilq/baby-python
|
python
|
import argparse, operator
from collections import defaultdict
from gpToDict import gpToDict, makeEntities
from utility import readFromFile
def run(target):
fileType = target.split('.')[-1]
if fileType == 'data':
entities = makeEntities(gpToDict(target)[0])
elif fileType == 'json':
entities = makeEntities(readFromFile(target))
else:
raise NotImplementedError
turretTargets = ['radiusOnDelim', 'radiusOnMax', 'radiusOnZero', 'delim', 'idealRadius', 'minRadius']
artilleryTargets = ['taperDist']
radiusShips = defaultdict(list)
for shipName, shipData in entities['Ship'].items():
componentSet = set()
upgrades = shipData['ShipUpgradeInfo']
for name, data in upgrades.items():
if type(data) == dict:
components = data['components']
if 'artillery' in components:
tgtComponents = components['artillery']
#print(name, components['artillery'])
componentSet |= set(tgtComponents)
#print(shipName, componentSet)
#data = {'delim': set(), 'max': set(), 'zero': set()}
data = defaultdict(set)
for artilleryName in componentSet:
artillery = shipData[artilleryName]
for pTurret, pTurretData in artillery.items():
if type(pTurretData) == dict and 'typeinfo' in pTurretData:
typeinfo = pTurretData['typeinfo']
if typeinfo['species'] == 'Main' and typeinfo['type'] == 'Gun':
for target in turretTargets:
data[target].add(pTurretData[target])
for target in artilleryTargets:
data[target].add(artillery[target])
#print(data)
try:
dataTuple = tuple([data[target].pop() for target in (turretTargets + artilleryTargets)])
radiusShips[dataTuple].append(shipName)
except:
pass
sortedKeys = list(radiusShips.keys())
sortedKeys.sort(key=operator.itemgetter(slice(0, -1)))
for disp in sortedKeys:
ships = radiusShips[disp]
outstr = ''
for i, items in enumerate(turretTargets):
outstr = F'{outstr}{items}: {disp[i]} '
tLen = len(turretTargets)
for i, items in enumerate(artilleryTargets):
outstr = F'{outstr}{items}: {disp[i + tLen]} '
print(outstr)
print()
temp = ''
for i, ship in enumerate(ships):
temp = F'{temp}{ship} '
if(i % 3 == 2):
print(temp)
temp = ''
if temp != '':
print(temp)
print()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("inDirectory", type=str, help="Input directory")
#parser.add_argument("outDirectory", type=str, help="Output directory")
#parser.add_argument("-o", "--output", type=str, help="Output file name")
args = parser.parse_args()
run(args.inDirectory)
|
nilq/baby-python
|
python
|
from modules.discriminator import MultiScaleDiscriminator, RandomWindowDiscriminator
from modules.generator import Aligner, Decoder, Encoder
from modules.mel import MelSpectrogram
|
nilq/baby-python
|
python
|
from django_roa.remoteauth.models import User
from django.contrib.auth.backends import ModelBackend
class RemoteUserModelBackend(ModelBackend):
"""
Authenticates against django_roa.remoteauth.models.RemoteUser.
"""
def authenticate(self, username=None, password=None, **kwargs):
try:
user = User.objects.get(username=username)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if not hasattr(user_obj, '_group_perm_cache'):
# TODO: improve performances
permissions = [u"%s.%s" % (p.content_type.app_label, p.codename) \
for group in user_obj.groups.all() \
for p in group.permissions.all()]
user_obj._group_perm_cache = permissions
return user_obj._group_perm_cache
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
nilq/baby-python
|
python
|
"""
Mountain Car environment adapted from OpenAI gym [1].
* default reward is 0 (instead of -1)
* reward in goal state is 1 (instead of 0)
* also implemented as a generative model (in addition to an online model)
* render function follows the rlberry rendering interface.
[1] https://github.com/openai/gym/blob/master/gym/envs/
classic_control/mountain_car.py
"""
import math
import numpy as np
import rlberry.spaces as spaces
from rlberry.envs.interface import Model
from rlberry.rendering import Scene, GeometricPrimitive, RenderInterface2D
class MountainCar(RenderInterface2D, Model):
"""
The agent (a car) is started at the bottom of a valley. For any given
state the agent may choose to accelerate to the left, right or cease
any acceleration.
Notes
-----
Source:
The environment appeared first in Andrew Moore's PhD Thesis (1990).
Observation:
Type: Box(2)
Num Observation Min Max
0 Car Position -1.2 0.6
1 Car Velocity -0.07 0.07
Actions:
Type: Discrete(3)
Num Action
0 Accelerate to the Left
1 Don't accelerate
2 Accelerate to the Right
Note: This does not affect the amount of velocity affected by the
gravitational pull acting on the car.
Reward:
Reward of 1 is awarded if the agent reached the flag (position = 0.5)
on top of the mountain.
Reward of 0 is awarded if the position of the agent is less than 0.5.
Starting State:
The position of the car is assigned a uniform random value in
[-0.6 , -0.4].
The starting velocity of the car is always assigned to 0.
Episode Termination:
The car position is more than 0.5
"""
name = "MountainCar"
def __init__(self, goal_velocity=0):
# init base classes
Model.__init__(self)
RenderInterface2D.__init__(self)
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.goal_velocity = goal_velocity
self.force = 0.001
self.gravity = 0.0025
self.low = np.array([self.min_position, -self.max_speed])
self.high = np.array([self.max_position, self.max_speed])
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(self.low, self.high)
self.reward_range = (0.0, 1.0)
# rendering info
self.set_clipping_area((-1.2, 0.6, -0.2, 1.1))
self.set_refresh_interval(10) # in milliseconds
# initial reset
self.reset()
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (
action,
type(action),
)
# save state for rendering
if self.is_render_enabled():
self.append_state_for_rendering(np.array(self.state))
next_state, reward, done, info = self.sample(self.state, action)
self.state = next_state.copy()
return next_state, reward, done, info
def reset(self):
self.state = np.array([self.rng.uniform(low=-0.6, high=-0.4), 0])
return self.state.copy()
def sample(self, state, action):
if not isinstance(state, np.ndarray):
state = np.array(state)
assert self.observation_space.contains(
state
), "Invalid state as argument of reset()."
assert self.action_space.contains(action), "%r (%s) invalid" % (
action,
type(action),
)
position = state[0]
velocity = state[1]
velocity += (action - 1) * self.force + math.cos(3 * position) * (-self.gravity)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if position == self.min_position and velocity < 0:
velocity = 0
done = bool(position >= self.goal_position and velocity >= self.goal_velocity)
reward = 0.0
if done:
reward = 1.0
next_state = np.array([position, velocity])
return next_state, reward, done, {}
@staticmethod
def _height(xs):
return np.sin(3 * xs) * 0.45 + 0.55
#
# Below: code for rendering
#
def get_background(self):
bg = Scene()
mountain = GeometricPrimitive("TRIANGLE_FAN")
flag = GeometricPrimitive("TRIANGLES")
mountain.set_color((0.6, 0.3, 0.0))
flag.set_color((0.0, 0.5, 0.0))
# Mountain
mountain.add_vertex((-0.3, -1.0))
mountain.add_vertex((0.6, -1.0))
n_points = 50
obs_range = self.observation_space.high[0] - self.observation_space.low[0]
eps = obs_range / (n_points - 1)
for ii in reversed(range(n_points)):
x = self.observation_space.low[0] + ii * eps
y = self._height(x)
mountain.add_vertex((x, y))
mountain.add_vertex((-1.2, -1.0))
# Flag
goal_x = self.goal_position
goal_y = self._height(goal_x)
flag.add_vertex((goal_x, goal_y))
flag.add_vertex((goal_x + 0.025, goal_y + 0.075))
flag.add_vertex((goal_x - 0.025, goal_y + 0.075))
bg.add_shape(mountain)
bg.add_shape(flag)
return bg
def get_scene(self, state):
scene = Scene()
agent = GeometricPrimitive("QUADS")
agent.set_color((0.0, 0.0, 0.0))
size = 0.025
x = state[0]
y = self._height(x)
agent.add_vertex((x - size, y - size))
agent.add_vertex((x + size, y - size))
agent.add_vertex((x + size, y + size))
agent.add_vertex((x - size, y + size))
scene.add_shape(agent)
return scene
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.