gt stringclasses 1
value | context stringlengths 2.49k 119k |
|---|---|
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
import bleach
from flask import current_app, request
from flask_login import UserMixin, AnonymousUserMixin
from . import db, login_manager
class Permission:
FOLLOW = 0x01
COMMENT = 0x02
WRITE_ARTICLES = 0x04
MODERATE_COMMENTS = 0x08
ADMINISTER = 0x80
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
default = db.Column(db.Boolean, default=False, index=True)
permissions = db.Column(db.Integer)
users = db.relationship('User', backref='role', lazy='dynamic')
@staticmethod
def insert_roles():
roles = {
'User': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES, True),
'Moderator': (Permission.FOLLOW |
Permission.COMMENT |
Permission.WRITE_ARTICLES |
Permission.MODERATE_COMMENTS, False),
'Administrator': (0xff, False)
}
for r in roles:
role = Role.query.filter_by(name=r).first()
if role is None:
role = Role(name=r)
role.permissions = roles[r][0]
role.default = roles[r][1]
db.session.add(role)
db.session.commit()
def __repr__(self):
return '<Role %r>' % self.name
class Follow(db.Model):
__tablename__ = 'follows'
follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.Text())
member_since = db.Column(db.DateTime(), default=datetime.utcnow)
last_seen = db.Column(db.DateTime(), default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
posts = db.relationship('Post', backref='author', lazy='dynamic')
followed = db.relationship('Follow',
foreign_keys=[Follow.follower_id],
backref=db.backref('follower', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
followers = db.relationship('Follow',
foreign_keys=[Follow.followed_id],
backref=db.backref('followed', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
comments = db.relationship('Comment', backref='author', lazy='dynamic')
apis = db.relationship('Api', backref='author', lazy='dynamic')
api_hash = db.Column(db.String(128), index=True)
api_gps = db.relationship('Api_gps', backref='author', lazy='dynamic')
user_files = db.relationship('User_files', backref='author', lazy='dynamic')
user_photos = db.relationship('User_photos', backref='author', lazy='dynamic')
sensors = db.relationship('Sensors', backref='author', lazy='dynamic')
#mqtts = db.relationship('mqtt_data', backref='author', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from sqlalchemy.exc import IntegrityError
from random import seed
import forgery_py
seed()
for i in range(count):
u = User(email=forgery_py.internet.email_address(),
username=forgery_py.internet.user_name(True),
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
@staticmethod
def add_self_follows():
for user in User.query.all():
if not user.is_following(user):
user.follow(user)
db.session.add(user)
db.session.commit()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.role is None:
if self.email == current_app.config['FLASKY_ADMIN']:
self.role = Role.query.filter_by(permissions=0xff).first()
if self.role is None:
self.role = Role.query.filter_by(default=True).first()
if self.email is not None and self.avatar_hash is None:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
self.followed.append(Follow(followed=self))
db.session.add(self)
db.session.commit()
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if new_email is None:
return False
if self.query.filter_by(email=new_email).first() is not None:
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return self.role is not None and \
(self.role.permissions & permissions) == permissions
def is_administrator(self):
return self.can(Permission.ADMINISTER)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=100, default='identicon', rating='g'):
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://www.gravatar.com/avatar'
hash = self.avatar_hash or hashlib.md5(
self.email.encode('utf-8')).hexdigest()
self.api_hash = hash
db.session.add(self)
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
def follow(self, user):
if not self.is_following(user):
f = Follow(follower=self, followed=user)
db.session.add(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first() is not None
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first() is not None
@property
def followed_posts(self):
return Post.query.join(Follow, Follow.followed_id == Post.author_id)\
.filter(Follow.follower_id == self.id)
def __repr__(self):
return '<User %r>' % self.username
class AnonymousUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_administrator(self):
return False
login_manager.anonymous_user = AnonymousUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
comments = db.relationship('Comment', backref='post', lazy='dynamic')
@staticmethod
def generate_fake(count=100):
from random import seed, randint
import forgery_py
seed()
user_count = User.query.count()
for i in range(count):
u = User.query.offset(randint(0, user_count - 1)).first()
p = Post(body=forgery_py.lorem_ipsum.sentences(randint(1, 5)),
timestamp=forgery_py.date.date(True),
author=u)
db.session.add(p)
db.session.commit()
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'blockquote', 'code',
'em', 'i', 'li', 'ol', 'pre', 'strong', 'ul',
'h1', 'h2', 'h3', 'p']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
db.event.listen(Post.body, 'set', Post.on_changed_body)
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
body = db.Column(db.Text)
body_html = db.Column(db.Text)
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
disabled = db.Column(db.Boolean)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
post_id = db.Column(db.Integer, db.ForeignKey('posts.id'))
@staticmethod
def on_changed_body(target, value, oldvalue, initiator):
allowed_tags = ['a', 'abbr', 'acronym', 'b', 'code', 'em', 'i',
'strong']
target.body_html = bleach.linkify(bleach.clean(
markdown(value, output_format='html'),
tags=allowed_tags, strip=True))
db.event.listen(Comment.body, 'set', Comment.on_changed_body)
class Api(db.Model):
__tablename__ = 'apis'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
value = db.Column(db.Integer)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class Api_gps(db.Model):
__tablename__ = 'api_gps'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
lat = db.Column(db.Float)
lng = db.Column(db.Float)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class User_files(db.Model):
__tablename__ = 'user_files'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
file_path = db.Column(db.String(128))
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class User_photos(db.Model):
__tablename__ = 'user_photos'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
file_path = db.Column(db.String(128))
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
class Sensors(db.Model):
__tablename__ = 'sensors'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
about_sensor = db.Column(db.Text())
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
sensor_data = db.relationship('Sensor_data', backref='sensor', lazy='dynamic')
class Sensor_data(db.Model):
__tablename__ = 'sensor_data'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
value = db.Column(db.Integer)
sensor_id = db.Column(db.Integer, db.ForeignKey('sensors.id'))
class mqtt_data(db.Model):
__tablename__ = 'mqtt_data'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
topic = db.Column(db.String(64))
message = db.Column(db.String(128))
#author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __init__(self, topic, message):
self.topic = topic
self.message = message
def __repr__(self):
return '<mqtt_data %r>' % self.topic + ':' + self.message
class mqtt_gps(db.Model):
__tablename__ = 'mqtt_gps'
id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime(), default=datetime.utcnow)
topic = db.Column(db.String(64))
message = db.Column(db.String(128))
author_id = db.Column(db.Integer)
gps_id = db.Column(db.Integer)
def __init__(self, topic, message, author_id, gps_id):
self.topic = topic
self.message = message
self.author_id = author_id
self.gps_id = gps_id
def __repr__(self):
return '<mqtt_gps %r>' % self.topic + ':' + self.author_id + ',' + self.message + ',' + self.gps_id
| |
# import os
import argparse
import sqlite3
import re
from datetime import datetime
import time
import os.path as op
now = datetime.now()
# dd/mm/YY H:M:S
d2 = now.strftime("%B %d, %Y %H:%M:%S")
def create_table(conn, sample_names):
c = conn.cursor()
data_columns = ['seqID text', 'source_file text', 'type text', 'start real',
'end real', 'score text', 'strand text', 'phase text', 'UID text', 'Read text', 'Name text', 'Parent text', 'Variant text',
'iso_5p real', 'iso_3p real', 'iso_add3p real', 'iso_add5p real', 'iso_snv real', 'iso_snv_seed real', 'iso_snv_central real', 'iso_snv_central_offset real',
'iso_snv_central_supp real', 'source text', 'cigar text', 'hits real', 'alias text', 'genomic_pos text', 'filter text',
'seed_fam text']
complete_headers = data_columns + sample_names
q = "CREATE TABLE IF NOT EXISTS data_sets(%s)" % ", ".join(complete_headers)
c.execute(q)
conn.commit()
def gff_insert_values(conn, complete_list):
try:
conn.execute('INSERT INTO data_sets VALUES(' + ','.join("?" * len(complete_list)) + ')', complete_list)
conn.commit()
except sqlite3.OperationalError as e:
print()
print("ERROR:")
print("sqlite3.OperationalError: {0}".format(e))
print("Help: Make sure to delete any existing database with tables of different schema")
exit()
# print("date and time =", d2)
def insert_sql(args):
if args.db:
out_file = op.join(args.out, args.db)
conn = sqlite3.connect(out_file)
c = conn.cursor()
else:
out_file = op.join(args.out, "mirtop.db")
conn = sqlite3.connect(out_file)
c = conn.cursor()
with open(args.gff, 'r') as f:
version = source = data_sets = tools = commands_exec = filter_tags = citation = num_records = ""
cnt = 0
for text in f:
# HEADER INFORMATION
if re.search("^## .* VERSION", text): # (R)
version = (text.strip().split(' ')[-1])
elif re.search("^## source-ontology", text): # (R)
source = (text.strip().split(' ')[-1])
elif re.search("^## COLDATA", text): # (R)
data_sets = (
text.strip().split(' ')[-1]) # Might contain more than one data set
sample_names = data_sets.split(',')
sample_names = [w.replace('-', '_') for w in sample_names]
string_text = "text"
output_sample_names = ["{} {}".format(i, string_text) for i in sample_names]
create_table(conn, output_sample_names)
elif re.search("^## TOOLS", text): # (R)
tools = (text.strip().split(' ')[-1])
elif re.search("^## CMD", text): # (O)
commands_exec = (text.strip().split(' ')[-1])
elif re.search("^## FILTER", text): # (O)
filter_tags = (text.strip().split(' ')[-1])
elif re.search("^## REFERENCE", text): # (O)
citation = (text.strip().split(' ')[-1])
# BODY - INFORMATION
elif not re.search("^#", text):
cnt += 1
lines = text.strip().split('\t')
if '=' in lines[-1]:
lines_info_array = lines[-1].replace("=", " ")
else:
lines_info_array = lines[-1]
info = lines_info_array.split('; ')
info_dict = dict()
for elements in info:
(k, v) = elements.split(' ')
info_dict.update([(k, v)])
if 'Variant' in k and ":" in v:
value_list = v.split(',')
for iso_vars in value_list:
if ":" in iso_vars:
(sub_k, sub_v) = iso_vars.split(':')
info_dict.update([(str(sub_k), str(sub_v))])
else:
### Exception for miRge format START
if iso_vars == "iso_snp":
iso_vars = "iso_snv"
elif iso_vars == "iso_add":
iso_vars = "iso_add3p"
### Exception for miRge format END
info_dict['iso_snv'] = "1" if iso_vars == 'iso_snv' else 0
info_dict['iso_snv_seed'] = "1" if iso_vars == 'iso_snv_seed' else 0
info_dict['iso_snv_central'] = "1" if iso_vars == 'iso_snv_central' else 0
info_dict['iso_snv_central_offset'] = "1" if iso_vars == 'iso_snv_central_offset' else 0
info_dict['iso_snv_central_supp'] = "1" if iso_vars == 'iso_snv_central_supp' else 0
prefix_list = [lines[0], lines[1], lines[2], lines[3], lines[4], lines[5], lines[6], lines[7],
info_dict.get('UID'), info_dict.get('Read'), info_dict.get('Name'),
info_dict.get('Parent'),
info_dict.get('Variant'),
str(info_dict.setdefault('iso_5p', None)),
str(info_dict.setdefault('iso_3p', None)),
str(info_dict.setdefault('iso_add3p', None)),
str(info_dict.setdefault('iso_add5p', None)),
str(info_dict.setdefault('iso_snv', "0")),
str(info_dict.setdefault('iso_snv_seed', "0")),
str(info_dict.setdefault('iso_snv_central', "0")),
str(info_dict.setdefault('iso_snv_central_offset', "0")),
str(info_dict.setdefault('iso_snv_central_supp', "0")),
source,
info_dict.setdefault('Cigar', None),
info_dict.setdefault('Hits', None), info_dict.setdefault('Alias', None),
info_dict.setdefault('Genomic', None),
info_dict.setdefault('Filter', None), info_dict.setdefault('Seed_fam', None)]
expression_list = info_dict.get('Expression').split(',')
complete_list = prefix_list + expression_list
gff_insert_values(conn, complete_list)
c.execute('''CREATE TABLE IF NOT EXISTS summary(version text, source text, data_sets text, tools text,
commands_exec text, filter_tags text, citation text, records real, date_stamp text)''')
c.execute("INSERT INTO summary(version, source, data_sets, tools, commands_exec, filter_tags, citation, "
"records, date_stamp) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)",
(version, source, data_sets, tools, commands_exec, filter_tags, citation, cnt, d2))
# info_dict.setdefault('Sex', None)
conn.commit()
def query_sql(args):
#print("Function query is being implemented, will be updated soon!!!")
#print(args)
if args.db:
out_file = op.join(args.db)
conn = sqlite3.connect(out_file)
c = conn.cursor()
else:
out_file = op.join("mirtop.db")
conn = sqlite3.connect(out_file)
c = conn.cursor()
#c.execute("SELECT name FROM sqlite_master WHERE type = 'table';")
#record = c.fetchall()
#print(args.db)
if args.expr == "show-tables":
show_tables(conn)
if args.expr == "show-schema":
if args.table:
show_schema(conn, args.table)
else:
print("Error: Require table name")
print("Usage: mirtop sql --query --db <input_database> -e show-schema -t <table_name>")
if args.expr == "show-columns":
show_columns(conn, args)
if args.expr == "describe-gff":
describe_gff_info(conn, args)
if args.expr == "isomirs-per-mirna":
if args.miRNA:
stats_isomiR_per_miRNA(conn, args.miRNA, args)
else:
print("Error: Require miRNA name")
print("Usage: mirtop sql --query --db <input_database> -e isomirs-per-mirna -miR <miRNA>")
if args.expr == "select":
select_query(conn, args)
pass
def show_tables(connection):
print(" +" + 25 * "-" + " +")
print(' | Tables |')
print(" +" + 25 * "-" + " +")
for (tableName,) in connection.execute(
"""
select NAME from SQLITE_MASTER where TYPE='table' order by NAME;
"""
):
tn_name = len(tableName)
req_format_space = 25 - tn_name
print(" | {tn}{format_space}|".format(
tn=tableName,
format_space=req_format_space * " "
)) # Table name (for each table)
print(" +" + 25 * "-" + " +")
def show_schema(connection, table_name):
for (tableName,) in connection.execute(
"""
select NAME from SQLITE_MASTER where TYPE='table' order by NAME;
"""
):
# print("{}:".format(tableName)) # Table name (for each table)
if tableName == table_name:
print(" +" + 57 * "-" + "+")
print(' | Sl | Field | Type | NULL | Key |')
print(" +" + 57 * "-" + "+")
for (
columnID, columnName, columnType,
columnNotNull, columnDefault, columnPK,
) in connection.execute("pragma table_info('{}');".format(tableName)):
name_size = len(columnName)
columnID_size = len(str(columnID))
required_len = 30 - name_size
req_col_size = 2 - columnID_size
print(" | {colSpace}{id} | {name}{space}| {type} | {null} | {pk} |".format(
colSpace=req_col_size * " ",
id=columnID,
name=columnName,
space=required_len * " ",
type=columnType if columnType else "NULL",
null=" not null" if columnNotNull else " NO ",
default=" [{}]".format(columnDefault) if columnDefault else "NULL",
pk=" *{}".format(columnPK) if columnPK else " ",
))
print(" +" + 57 * "-" + "+")
def show_columns(connection, args):
cur = connection.cursor()
query="SELECT * FROM data_sets LIMIT 2"
cur.execute(query)
rows = cur.fetchall()
col_name_list = [tuple[0] for tuple in cur.description]
sl_no=1
print("\nSerial\tColumn names")
for col in col_name_list:
print(" "+str(sl_no)+"\t"+str(col))
sl_no+=1
print()
def describe_gff_info(connection, args):
cur = connection.cursor()
query="SELECT * FROM summary"
cur.execute(query)
rows = cur.fetchone()
col_name_list = [tuple[0] for tuple in cur.description]
sl_no=1
print("\nSerial\tColumn names\tDescription")
for i, col in enumerate(col_name_list):
desc_g = rows[i]
if (desc_g ==""):
desc_g = "--"
print(" "+str(sl_no)+"\t"+str(col)+"\t\""+str(desc_g)+"\"")
sl_no+=1
print()
def stats_isomiR_per_miRNA(connection, miRNA_name, args):
cur = connection.cursor()
miR_array = add_mirnas(args)
#cur.execute('SELECT * FROM data_sets WHERE seqID=?', (miRNA_name,))
query="SELECT COUNT(*) FROM data_sets WHERE seqID=? AND type='isomiR' "
query = add_filter(query, args)
print()
stat_counts=0
if args.txtout:
print("The results are being fetched and formated to be written to "+ args.txtout)
#format_results()
#with open(args.txtout, 'w') as w_stat:
w_stat = open(args.txtout, 'w')
w_stat.write("Serial number\tmiRNA\tisomiR Count\n")
else:
print("OUTPUT:")
for miRs in miR_array:
t=(miRs, )
cur.execute(query, t)
#cur.execute("SELECT COUNT(*) FROM data_sets WHERE seqID=? AND type='isomiR'", t)
rows = cur.fetchall()
for row in rows:
stat_counts+=1
row = row[0]
if args.txtout:
w_stat.write(str(stat_counts) +"\t"+miRs+"\t"+str(row)+"\n")
else:
print(str(stat_counts) +". " +"isomiRs for miRNA "+ miRs + ": "+ str(row))
print()
if args.txtout:
w_stat.close()
pass
def WHERE_CLAUSE(query, args):
if "WHERE" in query:
query = query + " AND "
return(query)
else:
query = query + " WHERE "
return(query)
# ALWAYS EXECUTE THIS LIMIT FUNCTION AT THE END
def add_limit(query, args):
if args.limit:
query = query + " LIMIT "+ args.limit
return query
else:
return query
def add_filter(query, args):
if args.filter:
query = WHERE_CLAUSE(query, args)
query = query + " filter='" + args.filter +"' "
return query
else:
return query
def add_variants(query, args):
my_var_dict = {'iso_5p': 'iso_5p != "None"', 'iso_3p': 'iso_3p != "None"', 'iso_add3p': 'iso_add3p != "None"', 'iso_add5p': 'iso_add5p != "None"',
'iso_snv_seed':'iso_snv_seed != 0', 'iso_snv_central_offset':'iso_snv_central_offset != 0', 'iso_snv_central':'iso_snv_central != 0',
'iso_snv_central_supp':'iso_snv_central_supp != 0', 'iso_snv':'iso_snv != 0'}
user_req_var = args.variant.split(',')
values_req_var =[]
for eachVar in user_req_var:
try:
values_req_var.append(my_var_dict[eachVar])
except KeyError:
print("\nError: \"" + eachVar + "\" does not exist in the choices supported by (-var , --variant)\n")
print("use: mirtop sql -qh for more options")
exit()
#print(values_req_var)
insert_betwn = " AND "
query_suffix = (insert_betwn.join( values_req_var ))
query = WHERE_CLAUSE(query, args)
query = query + query_suffix
return query
#if args.filter:
#query = query + " AND " + query_suffix
#return query
#else:
#query = query + "WHERE " + query_suffix
#return query
def add_mirnas(args):
if args.miRNA.endswith('.txt'):
#print("I am called and I am safe here to read from a file")
with open(args.miRNA, 'r') as miList:
miR_array = miList.read().splitlines()
if args.miRNA_prefix:
element = str(args.miRNA_prefix) + "-"
miR_array = [element + s for s in miR_array]
return(miR_array)
else:
return(miR_array)
else:
miR_array=args.miRNA.split(',')
if args.miRNA_prefix:
element = str(args.miRNA_prefix) + "-"
miR_array = [element + s for s in miR_array]
return(miR_array)
else:
return(miR_array)
def perform_execution(conn, query, args):
cur = conn.cursor()
#print("QUERY: \n"+ query + "\n")
cur.execute(query)
rows = cur.fetchall()
col_name_list = [tuple[0] for tuple in cur.description]
if args.miRNA:
return(col_name_list, rows)
else:
format_results(col_name_list, rows, args)
def format_results(header, output, args):
header = '\t'.join(str(col) for col in header)
if args.txtout:
outList = open(args.txtout, 'w')
print("\nWriting data to file: "+ args.txtout + "\n")
outList.write(header+"\n")
write_to_file(output, args, outList)
else:
print(header)
if args.count:
output = list(output[0])
if args.columns:
print("Unique counts for "+ str(args.columns) + " is: " + str(output[0]))
else:
print("Unique counts for all rows is: " + str(output[0]))
else:
for eachrow in output:
row_tab = '\t'.join(str(items) for items in eachrow)
print(row_tab)
def write_to_file(output, args, fileHandler):
if args.miRNA:
fileHandler.write(output + "\n")
elif args.count:
output = list(output[0])
fileHandler.write("Unique counts for "+ args.columns + " is:\t" + str(output))
else:
for eachrow in output:
row_tab = '\t'.join(str(items) for items in eachrow)
fileHandler.write(row_tab+"\n")
def select_query(connection, args):
if args.columns:
if args.count:
if args.count == "T":
query = "SELECT COUNT(" + args.columns + ") FROM data_sets "
else:
print("\nERROR: -n is incorrect!. \nPlease use -n T and optionally specify any one column in -col.\nFor more options see mirtop sql -h")
exit()
else:
query = "SELECT " + args.columns + " FROM data_sets "
elif args.count:
query = "SELECT COUNT(*) FROM data_sets "
else:
query = "SELECT * FROM data_sets "
query = add_filter(query, args)
if args.variant:
query = add_variants(query, args)
if args.miRNA:
miR_array = add_mirnas(args)
query = WHERE_CLAUSE(query, args)
query_series = query + "seqID= "
header_var= ""
j=0
if args.txtout:
outList = open(args.txtout, 'w')
print("\nWriting data to file: "+ args.txtout + "\n")
for miRs in miR_array:
query = query_series + "\"" + miRs + "\" "
query = add_limit(query, args)
(header, rows) = perform_execution(connection, query, args)
j += 1
for i, row in enumerate(rows):
if (i == 0):
if header_var == "":
header_var = '\t'.join(str(col) for col in header)
if args.txtout:
outList.write(header_var+"\n")
else:
print(header_var)
row_tab = '\t'.join(str(items) for items in row)
if args.count:
newOut = str(j) + ". "+ miRs + ":\t" + row_tab
if args.txtout:
write_to_file(newOut, args, outList)
else:
print(newOut)
else:
if args.txtout:
write_to_file(row_tab, args, outList)
else:
print(row_tab)
else:
query = add_limit(query, args)
perform_execution(connection, query, args)
def sql_options(args):
user_options = vars(args)
if args.create:
if args.gff:
insert_sql(args)
else:
print("Usage: mirtop sql --create --gff <input.gff> --db <new_db_name> \(Default: mirtop.db\)")
elif args.query:
if args.expr:
# print("Usage: mirtop sql --query --db <input_database> -e <user_query>")
query_sql(args)
else:
print("Usage: mirtop sql --query --db <input_database> -e <user_query>")
else:
print("Usage: mirtop sql -h")
| |
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo.config import cfg
import webob
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import block_device_mapping
from nova.api.openstack.compute.plugins.v3 import multiple_create
from nova.api.openstack.compute.plugins.v3 import servers
from nova.compute import api as compute_api
from nova.compute import flavors
from nova import db
from nova import exception
from nova.network import manager
from nova.openstack.common import jsonutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import fake_instance
from nova.tests.image import fake
CONF = cfg.CONF
FAKE_UUID = fakes.FAKE_UUID
def fake_gen_uuid():
return FAKE_UUID
def return_security_group(context, instance_id, security_group_id):
pass
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
"""Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
enable_instance_password=True)
self.instance_cache_num = 0
self.instance_cache_by_id = {}
self.instance_cache_by_uuid = {}
ext_info = plugins.LoadedExtensionInfo()
self.controller = servers.ServersController(extension_info=ext_info)
CONF.set_override('extensions_blacklist', 'os-multiple-create',
'osapi_v3')
self.no_mult_create_controller = servers.ServersController(
extension_info=ext_info)
def instance_create(context, inst):
inst_type = flavors.get_flavor_by_flavor_id(3)
image_uuid = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
def_image_ref = 'http://localhost/images/%s' % image_uuid
self.instance_cache_num += 1
instance = fake_instance.fake_db_instance(**{
'id': self.instance_cache_num,
'display_name': inst['display_name'] or 'test',
'uuid': FAKE_UUID,
'instance_type': dict(inst_type),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': 'fead::1234',
'image_ref': inst.get('image_ref', def_image_ref),
'user_id': 'fake',
'project_id': 'fake',
'reservation_id': inst['reservation_id'],
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"progress": 0,
"fixed_ips": [],
"task_state": "",
"vm_state": "",
"security_groups": inst['security_groups'],
})
self.instance_cache_by_id[instance['id']] = instance
self.instance_cache_by_uuid[instance['uuid']] = instance
return instance
def instance_get(context, instance_id):
"""Stub for compute/api create() pulling in instance after
scheduling
"""
return self.instance_cache_by_id[instance_id]
def instance_update(context, uuid, values):
instance = self.instance_cache_by_uuid[uuid]
instance.update(values)
return instance
def server_update(context, instance_uuid, params, update_cells=True,
columns_to_join=None):
inst = self.instance_cache_by_uuid[instance_uuid]
inst.update(params)
return (inst, inst)
def fake_method(*args, **kwargs):
pass
def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
return 'network_topic'
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fake.stub_out_image_service(self.stubs)
fakes.stub_out_nw_api(self.stubs)
self.stubs.Set(uuid, 'uuid4', fake_gen_uuid)
self.stubs.Set(db, 'instance_add_security_group',
return_security_group)
self.stubs.Set(db, 'project_get_networks',
project_get_networks)
self.stubs.Set(db, 'instance_create', instance_create)
self.stubs.Set(db, 'instance_system_metadata_update',
fake_method)
self.stubs.Set(db, 'instance_get', instance_get)
self.stubs.Set(db, 'instance_update', instance_update)
self.stubs.Set(db, 'instance_update_and_get_original',
server_update)
self.stubs.Set(manager.VlanManager, 'allocate_fixed_ip',
fake_method)
def _test_create_extra(self, params, no_image=False,
override_controller=None):
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
server = dict(name='server_test', imageRef=image_uuid, flavorRef=2)
if no_image:
server.pop('imageRef', None)
server.update(params)
body = dict(server=server)
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
if override_controller:
server = override_controller.create(req, body=body).obj['server']
else:
server = self.controller.create(req, body=body).obj['server']
def test_create_instance_with_multiple_create_disabled(self):
min_count = 2
max_count = 3
params = {
multiple_create.MIN_ATTRIBUTE_NAME: min_count,
multiple_create.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertNotIn('min_count', kwargs)
self.assertNotIn('max_count', kwargs)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(
params,
override_controller=self.no_mult_create_controller)
def test_multiple_create_with_string_type_min_and_max(self):
min_count = '2'
max_count = '3'
params = {
multiple_create.MIN_ATTRIBUTE_NAME: min_count,
multiple_create.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertIsInstance(kwargs['min_count'], int)
self.assertIsInstance(kwargs['max_count'], int)
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_with_multiple_create_enabled(self):
min_count = 2
max_count = 3
params = {
multiple_create.MIN_ATTRIBUTE_NAME: min_count,
multiple_create.MAX_ATTRIBUTE_NAME: max_count,
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['max_count'], 3)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
self._test_create_extra(params)
def test_create_instance_invalid_negative_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create,
req,
body=body)
def test_create_instance_invalid_negative_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MAX_ATTRIBUTE_NAME: -1,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create,
req,
body=body)
def test_create_instance_with_blank_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: '',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create,
req,
body=body)
def test_create_instance_with_blank_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MAX_ATTRIBUTE_NAME: '',
'name': 'server_test',
'image_ref': image_href,
'flavor_ref': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create,
req,
body=body)
def test_create_instance_invalid_min_greater_than_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 4,
multiple_create.MAX_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create,
req,
body=body)
def test_create_instance_invalid_alpha_min(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create,
req,
body=body)
def test_create_instance_invalid_alpha_max(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MAX_ATTRIBUTE_NAME: 'abcd',
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create,
req,
body=body)
def test_create_multiple_instances(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body=body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_len(res["server"])
def test_create_multiple_instances_pass_disabled(self):
"""Test creating multiple instances but not asking for
reservation_id
"""
self.flags(enable_instance_password=False)
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body=body).obj
self.assertEqual(FAKE_UUID, res["server"]["id"])
self._check_admin_password_missing(res["server"])
def _check_admin_password_len(self, server_dict):
"""utility function - check server_dict for admin_password length."""
self.assertEqual(CONF.password_length,
len(server_dict["adminPass"]))
def _check_admin_password_missing(self, server_dict):
"""utility function - check server_dict for admin_password absence."""
self.assertNotIn("admin_password", server_dict)
def _create_multiple_instances_resv_id_return(self, resv_id_return):
"""Test creating multiple instances with asking for
reservation_id
"""
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 2,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
multiple_create.RRID_ATTRIBUTE_NAME: resv_id_return
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
res = self.controller.create(req, body=body)
reservation_id = res.obj['reservation_id']
self.assertNotEqual(reservation_id, "")
self.assertIsNotNone(reservation_id)
self.assertTrue(len(reservation_id) > 1)
def test_create_multiple_instances_with_resv_id_return(self):
self._create_multiple_instances_resv_id_return(True)
def test_create_multiple_instances_with_string_resv_id_return(self):
self._create_multiple_instances_resv_id_return("True")
def test_create_multiple_instances_with_multiple_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested with a list of block device mappings for volumes.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'},
{'source_type': 'volume', 'uuid': 'vol-yyyy'}
]
params = {
block_device_mapping.ATTRIBUTE_NAME: bdm,
multiple_create.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(len(kwargs['block_device_mapping']), 2)
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instances_with_single_volume_bdm(self):
"""Test that a BadRequest is raised if multiple instances
are requested to boot from a single volume.
"""
min_count = 2
bdm = [{'source_type': 'volume', 'uuid': 'vol-xxxx'}]
params = {
block_device_mapping.ATTRIBUTE_NAME: bdm,
multiple_create.MIN_ATTRIBUTE_NAME: min_count
}
old_create = compute_api.API.create
def create(*args, **kwargs):
self.assertEqual(kwargs['min_count'], 2)
self.assertEqual(kwargs['block_device_mapping'][0]['volume_id'],
'vol-xxxx')
return old_create(*args, **kwargs)
self.stubs.Set(compute_api.API, 'create', create)
exc = self.assertRaises(webob.exc.HTTPBadRequest,
self._test_create_extra, params, no_image=True)
self.assertEqual("Cannot attach one or more volumes to multiple "
"instances", exc.explanation)
def test_create_multiple_instance_with_non_integer_max_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MAX_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
def test_create_multiple_instance_with_non_integer_min_count(self):
image_href = '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6'
flavor_ref = 'http://localhost/123/flavors/3'
body = {
'server': {
multiple_create.MIN_ATTRIBUTE_NAME: 2.5,
'name': 'server_test',
'imageRef': image_href,
'flavorRef': flavor_ref,
'metadata': {'hello': 'world',
'open': 'stack'},
}
}
req = fakes.HTTPRequestV3.blank('/servers')
req.method = 'POST'
req.body = jsonutils.dumps(body)
req.headers["content-type"] = "application/json"
self.assertRaises(exception.ValidationError,
self.controller.create, req, body=body)
| |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
return {"refnames": git_refnames, "full": git_full, "date": git_date}
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "ineqpy-"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "ineqpy/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(
commands, args, cwd=None, verbose=False, hide_stderr=False, env=None
):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(
[c] + args,
cwd=cwd,
env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr else None),
)
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for _ in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {
"version": dirname[len(parentdir_prefix) :],
"full-revisionid": None,
"dirty": False,
"error": None,
"date": None,
}
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print(
"Tried directories %s but none started with prefix %s"
% (str(rootdirs), parentdir_prefix)
)
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
with open(versionfile_abs, "r") as f:
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = {r.strip() for r in refnames.strip("()").split(",")}
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = {r[len(TAG) :] for r in refs if r.startswith(TAG)}
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = {r for r in refs if re.search(r"\d", r)}
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix) :]
if verbose:
print("picking %s" % r)
return {
"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": None,
"date": date,
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {
"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False,
"error": "no suitable tags",
"date": None,
}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git.cmd", "git.exe"] if sys.platform == "win32" else ["git"]
out, rc = run_command(
GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True
)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(
GITS,
[
"describe",
"--tags",
"--dirty",
"--always",
"--long",
"--match",
"%s*" % tag_prefix,
],
cwd=root,
)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {"long": full_out, "short": full_out[:7], "error": None}
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[: git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r"^(.+)-(\d+)-g([0-9a-f]+)$", git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = (
"unable to parse git-describe output: '%s'" % describe_out
)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = "tag '%s' doesn't start with prefix '%s'" % (
full_tag,
tag_prefix,
)
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix) :]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(
GITS, ["rev-list", "HEAD", "--count"], cwd=root
)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[
0
].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
return "." if "+" in pieces.get("closest-tag", "") else "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {
"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None,
}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {
"version": rendered,
"full-revisionid": pieces["long"],
"dirty": pieces["dirty"],
"error": None,
"date": pieces.get("date"),
}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(
get_keywords(), cfg.tag_prefix, verbose
)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split("/"):
root = os.path.dirname(root)
except NameError:
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None,
}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {
"version": "0+unknown",
"full-revisionid": None,
"dirty": None,
"error": "unable to compute version",
"date": None,
}
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""This module contains Google Datastore operators."""
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.datastore import DatastoreHook
from airflow.providers.google.cloud.hooks.gcs import GCSHook
if TYPE_CHECKING:
from airflow.utils.context import Context
class CloudDatastoreExportEntitiesOperator(BaseOperator):
"""
Export entities from Google Cloud Datastore to Cloud Storage
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreExportEntitiesOperator`
.. seealso::
https://cloud.google.com/datastore/docs/export-import-entities
:param bucket: name of the cloud storage bucket to backup data
:type bucket: str
:param namespace: optional namespace path in the specified Cloud Storage bucket
to backup data. If this namespace does not exist in GCS, it will be created.
:type namespace: str
:param datastore_conn_id: the name of the Datastore connection id to use
:type datastore_conn_id: str
:param cloud_storage_conn_id: the name of the cloud storage connection id to
force-write backup
:type cloud_storage_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param entity_filter: description of what data from the project is included in the
export, refer to
https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
:type entity_filter: dict
:param labels: client-assigned labels for cloud storage
:type labels: dict
:param polling_interval_in_seconds: number of seconds to wait before polling for
execution status again
:type polling_interval_in_seconds: int
:param overwrite_existing: if the storage bucket + namespace is not empty, it will be
emptied prior to exports. This enables overwriting existing backups.
:type overwrite_existing: bool
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
'bucket',
'namespace',
'entity_filter',
'labels',
'impersonation_chain',
)
def __init__(
self,
*,
bucket: str,
namespace: Optional[str] = None,
datastore_conn_id: str = 'google_cloud_default',
cloud_storage_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
entity_filter: Optional[dict] = None,
labels: Optional[dict] = None,
polling_interval_in_seconds: int = 10,
overwrite_existing: bool = False,
project_id: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.datastore_conn_id = datastore_conn_id
self.cloud_storage_conn_id = cloud_storage_conn_id
self.delegate_to = delegate_to
self.bucket = bucket
self.namespace = namespace
self.entity_filter = entity_filter
self.labels = labels
self.polling_interval_in_seconds = polling_interval_in_seconds
self.overwrite_existing = overwrite_existing
self.project_id = project_id
self.impersonation_chain = impersonation_chain
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
def execute(self, context: 'Context') -> dict:
self.log.info('Exporting data to Cloud Storage bucket %s', self.bucket)
if self.overwrite_existing and self.namespace:
gcs_hook = GCSHook(self.cloud_storage_conn_id, impersonation_chain=self.impersonation_chain)
objects = gcs_hook.list(self.bucket, prefix=self.namespace)
for obj in objects:
gcs_hook.delete(self.bucket, obj)
ds_hook = DatastoreHook(
self.datastore_conn_id,
self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
result = ds_hook.export_to_storage_bucket(
bucket=self.bucket,
namespace=self.namespace,
entity_filter=self.entity_filter,
labels=self.labels,
project_id=self.project_id,
)
operation_name = result['name']
result = ds_hook.poll_operation_until_done(operation_name, self.polling_interval_in_seconds)
state = result['metadata']['common']['state']
if state != 'SUCCESSFUL':
raise AirflowException(f'Operation failed: result={result}')
return result
class CloudDatastoreImportEntitiesOperator(BaseOperator):
"""
Import entities from Cloud Storage to Google Cloud Datastore
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreImportEntitiesOperator`
.. seealso::
https://cloud.google.com/datastore/docs/export-import-entities
:param bucket: container in Cloud Storage to store data
:type bucket: str
:param file: path of the backup metadata file in the specified Cloud Storage bucket.
It should have the extension .overall_export_metadata
:type file: str
:param namespace: optional namespace of the backup metadata file in
the specified Cloud Storage bucket.
:type namespace: str
:param entity_filter: description of what data from the project is included in
the export, refer to
https://cloud.google.com/datastore/docs/reference/rest/Shared.Types/EntityFilter
:type entity_filter: dict
:param labels: client-assigned labels for cloud storage
:type labels: dict
:param datastore_conn_id: the name of the connection id to use
:type datastore_conn_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param polling_interval_in_seconds: number of seconds to wait before polling for
execution status again
:type polling_interval_in_seconds: float
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
'bucket',
'file',
'namespace',
'entity_filter',
'labels',
'impersonation_chain',
)
def __init__(
self,
*,
bucket: str,
file: str,
namespace: Optional[str] = None,
entity_filter: Optional[dict] = None,
labels: Optional[dict] = None,
datastore_conn_id: str = 'google_cloud_default',
delegate_to: Optional[str] = None,
polling_interval_in_seconds: float = 10,
project_id: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.datastore_conn_id = datastore_conn_id
self.delegate_to = delegate_to
self.bucket = bucket
self.file = file
self.namespace = namespace
self.entity_filter = entity_filter
self.labels = labels
self.polling_interval_in_seconds = polling_interval_in_seconds
self.project_id = project_id
self.impersonation_chain = impersonation_chain
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'BaseOperator.do_xcom_push' instead")
def execute(self, context: 'Context'):
self.log.info('Importing data from Cloud Storage bucket %s', self.bucket)
ds_hook = DatastoreHook(
self.datastore_conn_id,
self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
result = ds_hook.import_from_storage_bucket(
bucket=self.bucket,
file=self.file,
namespace=self.namespace,
entity_filter=self.entity_filter,
labels=self.labels,
project_id=self.project_id,
)
operation_name = result['name']
result = ds_hook.poll_operation_until_done(operation_name, self.polling_interval_in_seconds)
state = result['metadata']['common']['state']
if state != 'SUCCESSFUL':
raise AirflowException(f'Operation failed: result={result}')
return result
class CloudDatastoreAllocateIdsOperator(BaseOperator):
"""
Allocate IDs for incomplete keys. Return list of keys.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreAllocateIdsOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:param project_id: Google Cloud project ID against which to make the request.
:type project_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"partial_keys",
"impersonation_chain",
)
def __init__(
self,
*,
partial_keys: List,
project_id: Optional[str] = None,
delegate_to: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.partial_keys = partial_keys
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> list:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
keys = hook.allocate_ids(
partial_keys=self.partial_keys,
project_id=self.project_id,
)
return keys
class CloudDatastoreBeginTransactionOperator(BaseOperator):
"""
Begins a new transaction. Returns a transaction handle.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreBeginTransactionOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/beginTransaction
:param transaction_options: Options for a new transaction.
:type transaction_options: Dict[str, Any]
:param project_id: Google Cloud project ID against which to make the request.
:type project_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"transaction_options",
"impersonation_chain",
)
def __init__(
self,
*,
transaction_options: Dict[str, Any],
project_id: Optional[str] = None,
delegate_to: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.transaction_options = transaction_options
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> str:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
handle = hook.begin_transaction(
transaction_options=self.transaction_options,
project_id=self.project_id,
)
return handle
class CloudDatastoreCommitOperator(BaseOperator):
"""
Commit a transaction, optionally creating, deleting or modifying some entities.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreCommitOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/commit
:param body: the body of the commit request.
:type body: dict
:param project_id: Google Cloud project ID against which to make the request.
:type project_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"body",
"impersonation_chain",
)
def __init__(
self,
*,
body: Dict[str, Any],
project_id: Optional[str] = None,
delegate_to: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> dict:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.commit(
body=self.body,
project_id=self.project_id,
)
return response
class CloudDatastoreRollbackOperator(BaseOperator):
"""
Roll back a transaction.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreRollbackOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/rollback
:param transaction: the transaction to roll back.
:type transaction: str
:param project_id: Google Cloud project ID against which to make the request.
:type project_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"transaction",
"impersonation_chain",
)
def __init__(
self,
*,
transaction: str,
project_id: Optional[str] = None,
delegate_to: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.transaction = transaction
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.rollback(
transaction=self.transaction,
project_id=self.project_id,
)
class CloudDatastoreRunQueryOperator(BaseOperator):
"""
Run a query for entities. Returns the batch of query results.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreRunQueryOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/runQuery
:param body: the body of the query request.
:type body: dict
:param project_id: Google Cloud project ID against which to make the request.
:type project_id: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"body",
"impersonation_chain",
)
def __init__(
self,
*,
body: Dict[str, Any],
project_id: Optional[str] = None,
delegate_to: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.body = body
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> dict:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.run_query(
body=self.body,
project_id=self.project_id,
)
return response
class CloudDatastoreGetOperationOperator(BaseOperator):
"""
Gets the latest state of a long-running operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreGetOperationOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/get
:param name: the name of the operation resource.
:type name: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
def __init__(
self,
*,
name: str,
delegate_to: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context'):
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
op = hook.get_operation(name=self.name)
return op
class CloudDatastoreDeleteOperationOperator(BaseOperator):
"""
Deletes the long-running operation.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDatastoreDeleteOperationOperator`
.. seealso::
https://cloud.google.com/datastore/docs/reference/data/rest/v1/projects.operations/delete
:param name: the name of the operation resource.
:type name: str
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:type gcp_conn_id: str
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
:type impersonation_chain: Union[str, Sequence[str]]
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
def __init__(
self,
*,
name: str,
delegate_to: Optional[str] = None,
gcp_conn_id: str = 'google_cloud_default',
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.gcp_conn_id = gcp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
def execute(self, context: 'Context') -> None:
hook = DatastoreHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
hook.delete_operation(name=self.name)
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<i@binux.me>
# http://binux.me
# Created on 2014-02-16 23:12:48
import sys
import inspect
import functools
import fractions
import six
from six import add_metaclass, iteritems
from pyspider.libs.log import LogFormatter
from pyspider.libs.url import quote_chinese, _build_url, _encode_params, _encode_multipart_formdata
from pyspider.libs.utils import md5string, hide_me, pretty_unicode
from pyspider.libs.ListIO import ListO
from pyspider.libs.response import rebuild_response
from pyspider.libs.pprint import pprint
class ProcessorResult(object):
"""The result and logs producted by a callback"""
def __init__(self, result, follows, messages, logs, exception, extinfo):
self.result = result
self.follows = follows
self.messages = messages
self.logs = logs
self.exception = exception
self.extinfo = extinfo
def rethrow(self):
"""rethrow the exception"""
if self.exception:
raise self.exception
def logstr(self):
"""handler the log records to formatted string"""
result = []
formater = LogFormatter(color=False)
for record in self.logs:
if isinstance(record, six.string_types):
result.append(pretty_unicode(record))
else:
if record.exc_info:
a, b, tb = record.exc_info
tb = hide_me(tb, globals())
record.exc_info = a, b, tb
result.append(pretty_unicode(formater.format(record)))
result.append(u'\n')
return u''.join(result)
def catch_status_code_error(func):
"""
Non-200 response will been regarded as fetch failed and will not pass to callback.
Use this decorator to override this feature.
"""
func._catch_status_code_error = True
return func
def not_send_status(func):
"""
Do not send process status package back to scheduler.
It's used by callbacks like on_message, on_result etc...
"""
@functools.wraps(func)
def wrapper(self, response, task):
self._extinfo['not_send_status'] = True
function = func.__get__(self, self.__class__)
return self._run_func(function, response, task)
return wrapper
def config(_config=None, **kwargs):
"""
A decorator for setting the default kwargs of `BaseHandler.crawl`.
Any self.crawl with this callback will use this config.
"""
if _config is None:
_config = {}
_config.update(kwargs)
def wrapper(func):
func._config = _config
return func
return wrapper
class NOTSET(object):
pass
def every(minutes=NOTSET, seconds=NOTSET):
"""
method will been called every minutes or seconds
"""
def wrapper(func):
@functools.wraps(func)
def on_cronjob(self, response, task):
if (
response.save
and 'tick' in response.save
and response.save['tick'] % (minutes * 60 + seconds) != 0
):
return None
function = func.__get__(self, self.__class__)
return self._run_func(function, response, task)
on_cronjob.is_cronjob = True
on_cronjob.tick = minutes * 60 + seconds
return on_cronjob
if inspect.isfunction(minutes):
func = minutes
minutes = 1
seconds = 0
return wrapper(func)
if minutes is NOTSET:
if seconds is NOTSET:
minutes = 1
seconds = 0
else:
minutes = 0
if seconds is NOTSET:
seconds = 0
return wrapper
class BaseHandlerMeta(type):
def __new__(cls, name, bases, attrs):
cron_jobs = []
min_tick = 0
for each in attrs.values():
if inspect.isfunction(each) and getattr(each, 'is_cronjob', False):
cron_jobs.append(each)
min_tick = fractions.gcd(min_tick, each.tick)
newcls = type.__new__(cls, name, bases, attrs)
newcls._cron_jobs = cron_jobs
newcls._min_tick = min_tick
return newcls
@add_metaclass(BaseHandlerMeta)
class BaseHandler(object):
"""
BaseHandler for all scripts.
`BaseHandler.run` is the main method to handler the task.
"""
crawl_config = {}
project_name = None
_cron_jobs = []
_min_tick = 0
__env__ = {'not_inited': True}
def _reset(self):
"""
reset before each task
"""
self._extinfo = {}
self._messages = []
self._follows = []
self._follows_keys = set()
def _run_func(self, function, *arguments):
"""
Running callback function with requested number of arguments
"""
args, varargs, keywords, defaults = inspect.getargspec(function)
return function(*arguments[:len(args) - 1])
def _run(self, task, response):
"""
Finding callback specified by `task['callback']`
raising status error for it if needed.
"""
self._reset()
if isinstance(response, dict):
response = rebuild_response(response)
process = task.get('process', {})
callback = process.get('callback', '__call__')
if not hasattr(self, callback):
raise NotImplementedError("self.%s() not implemented!" % callback)
function = getattr(self, callback)
if not getattr(function, '_catch_status_code_error', False):
response.raise_for_status()
return self._run_func(function, response, task)
def run(self, module, task, response):
"""
Processing the task, catching exceptions and logs, return a `ProcessorResult` object
"""
logger = module.logger
result = None
exception = None
stdout = sys.stdout
self.task = task
self.response = response
try:
sys.stdout = ListO(module.log_buffer)
if inspect.isgeneratorfunction(self._run):
for result in self._run(task, response):
self._run_func(self.on_result, result, response, task)
else:
result = self._run(task, response)
self._run_func(self.on_result, result, response, task)
except Exception as e:
logger.exception(e)
exception = e
finally:
self.task = None
self.response = None
sys.stdout = stdout
follows = self._follows
messages = self._messages
logs = list(module.log_buffer)
extinfo = self._extinfo
module.log_buffer[:] = []
return ProcessorResult(result, follows, messages, logs, exception, extinfo)
def _crawl(self, url, **kwargs):
"""
real crawl API
checking kwargs, and repack them to each sub-dict
"""
task = {}
if kwargs.get('callback'):
callback = kwargs['callback']
if isinstance(callback, six.string_types) and hasattr(self, callback):
func = getattr(self, callback)
elif six.callable(callback) and six.get_method_self(callback) is self:
func = callback
kwargs['callback'] = func.__name__
else:
raise NotImplementedError("self.%s() not implemented!" % callback)
if hasattr(func, '_config'):
for k, v in iteritems(func._config):
kwargs.setdefault(k, v)
for k, v in iteritems(self.crawl_config):
kwargs.setdefault(k, v)
url = quote_chinese(_build_url(url.strip(), kwargs.get('params')))
if kwargs.get('files'):
assert isinstance(
kwargs.get('data', {}), dict), "data must be a dict when using with files!"
content_type, data = _encode_multipart_formdata(kwargs.get('data', {}),
kwargs.get('files', {}))
kwargs.setdefault('headers', {})
kwargs['headers']['Content-Type'] = content_type
kwargs['data'] = data
if kwargs.get('data'):
kwargs['data'] = _encode_params(kwargs['data'])
if kwargs.get('data'):
kwargs.setdefault('method', 'POST')
schedule = {}
for key in ('priority', 'retries', 'exetime', 'age', 'itag', 'force_update'):
if key in kwargs and kwargs[key] is not None:
schedule[key] = kwargs[key]
task['schedule'] = schedule
fetch = {}
for key in (
'method',
'headers',
'data',
'timeout',
'allow_redirects',
'cookies',
'proxy',
'etag',
'last_modifed',
'save',
'js_run_at',
'js_script',
'load_images',
'fetch_type'
):
if key in kwargs and kwargs[key] is not None:
fetch[key] = kwargs[key]
task['fetch'] = fetch
process = {}
for key in ('callback', ):
if key in kwargs and kwargs[key] is not None:
process[key] = kwargs[key]
task['process'] = process
task['project'] = self.project_name
task['url'] = url
task['taskid'] = task.get('taskid') or self.get_taskid(task)
cache_key = "%(project)s:%(taskid)s" % task
if cache_key not in self._follows_keys:
self._follows_keys.add(cache_key)
self._follows.append(task)
return task
def get_taskid(self, task):
'''Generate taskid by information of task md5(url) by default, override me'''
return md5string(task['url'])
# apis
def crawl(self, url, **kwargs):
'''
avalable params:
url
callback
method
params
data
files
headers
timeout
allow_redirects
cookies
proxy
etag
last_modifed
fetch_type
js_run_at
js_script
load_images
priority
retries
exetime
age
itag
save
taskid
full documents: http://pyspider.readthedocs.org/en/latest/apis/self.crawl/
'''
if isinstance(url, six.string_types):
return self._crawl(url, **kwargs)
elif hasattr(url, "__iter__"):
result = []
for each in url:
result.append(self._crawl(each, **kwargs))
return result
def is_debugger(self):
"""Return true if running in debugger"""
return self.__env__.get('debugger')
def send_message(self, project, msg, url='data:,on_message'):
"""Send messages to other project."""
self._messages.append((project, msg, url))
def on_message(self, project, msg):
"""Receive message from other project, override me."""
pass
def on_result(self, result):
"""Receiving returns from other callback, override me."""
if not result:
return
assert self.task, "on_result can't outside a callback."
if self.is_debugger():
pprint(result)
if self.__env__.get('result_queue'):
self.__env__['result_queue'].put((self.task, result))
@not_send_status
def _on_message(self, response):
project, msg = response.save
return self.on_message(project, msg)
@not_send_status
def _on_cronjob(self, response, task):
for cronjob in self._cron_jobs:
function = cronjob.__get__(self, self.__class__)
self._run_func(function, response, task)
@not_send_status
def _on_get_info(self, response, task):
"""Sending runtime infomation about this script."""
result = {}
assert response.save
for each in response.save:
if each == 'min_tick':
result[each] = self._min_tick
self.crawl('data:,on_get_info', save=result)
| |
"""Bio-inspired optimisation algorithms."""
from concurrent import futures
import datetime
import operator
import random
import warnings
import sys
from deap import base, creator, tools
import numpy
import matplotlib.pylab as plt
from external_programs.profit import run_profit
warning_string = """
optimizers.py is now deprecated and will be removed in a future version.
All the optimizers contained in this module are available through the
evo_optmizers.py module, with enhanced functionality and a more consistent
interface. See
https://gist.github.com/ChrisWellsWood/8647d965de2e3c68620daa2dc64de42a for
information on usage.
"""
def buff_eval(params):
"""Builds and evaluates BUFF energy of model in parallelization
Parameters
----------
params: list
Tuple containing the specification to be built, the sequence,
and the parameters for model building.
Returns
-------
model.bude_score: float
BUFF score for model to be assigned to particle fitness value.
"""
specification, sequence, parsed_ind = params
model = specification(*parsed_ind)
model.build()
model.pack_new_sequences(sequence)
return model.buff_interaction_energy.total_energy
def buff_internal_eval(params):
"""Builds and evaluates BUFF internal energy of a model in parallelization
Parameters
----------
params: list
Tuple containing the specification to be built, the sequence
and the parameters for model building.
Returns
-------
model.bude_score: float
BUFF internal energy score to be assigned to particle fitness
value.
"""
specification, sequence, parsed_ind = params
model = specification(*parsed_ind)
model.build()
model.pack_new_sequences(sequence)
return model.buff_internal_energy.total_energy
def rmsd_eval(rmsd_params):
"""Builds a model and runs profit against a reference model.
Parameters
----------
rmsd_params
Returns
-------
rmsd: float
rmsd against reference model as calculated by profit.
"""
specification, sequence, parsed_ind, reference_pdb = rmsd_params
model = specification(*parsed_ind)
model.pack_new_sequences(sequence)
ca, bb, aa = run_profit(model.pdb, reference_pdb, path1=False, path2=False)
return bb
def comparator_eval(comparator_params):
"""Gets BUFF score for interaction between two AMPAL objects
"""
top1, top2, params1, params2, seq1, seq2, movements = comparator_params
xrot, yrot, zrot, xtrans, ytrans, ztrans = movements
obj1 = top1(*params1)
obj2 = top2(*params2)
obj2.rotate(xrot, [1, 0, 0])
obj2.rotate(yrot, [0, 1, 0])
obj2.rotate(zrot, [0, 0, 1])
obj2.translate([xtrans, ytrans, ztrans])
model = obj1 + obj2
model.relabel_all()
model.pack_new_sequences(seq1 + seq2)
return model.buff_interaction_energy.total_energy
class BaseOptimizer:
def __init__(self, **kwargs):
warnings.warn(warning_string, PendingDeprecationWarning)
self._params = {}
self._params.update(**kwargs)
creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
self.toolbox = base.Toolbox()
self.parameter_log = []
def parse_individual(self, individual):
"""Converts a deap individual into a full list of parameters.
Parameters
----------
individual: deap individual from optimization
Details vary according to type of optimization, but
parameters within deap individual are always between -1
and 1. This function converts them into the values used to
actually build the model
Returns
-------
fullpars: list
Full parameter list for model building.
"""
scaled_ind = []
for i in range(len(self._params['value_means'])):
scaled_ind.append(self._params['value_means'][i] + (
individual[i] * self._params['value_ranges'][i]))
fullpars = list(self._params['arrangement'])
for k in range(len(self._params['variable_parameters'])):
for j in range(len(fullpars)):
if fullpars[j] == self._params['variable_parameters'][k]:
fullpars[j] = scaled_ind[k]
return fullpars
def run_opt(self, popsize, numgen, processors,
plot=False, log=False, **kwargs):
"""
Runs the optimizer.
:param popsize:
:param numgen:
:param processors:
:param plot:
:param log:
:param kwargs:
:return:
"""
self._params['popsize'] = popsize
self._params['numgen'] = numgen
self._params['processors'] = processors
self._params['plot'] = plot
self._params['log'] = log
# allows us to pass in additional arguments e.g. neighbours
self._params.update(**kwargs)
self.halloffame = tools.HallOfFame(1)
self.stats = tools.Statistics(lambda thing: thing.fitness.values)
self.stats.register("avg", numpy.mean)
self.stats.register("std", numpy.std)
self.stats.register("min", numpy.min)
self.stats.register("max", numpy.max)
self.logbook = tools.Logbook()
self.logbook.header = ["gen", "evals"] + self.stats.fields
self._params['model_count'] = 0
start_time = datetime.datetime.now()
self.initialize_pop()
for g in range(self._params['numgen']):
self.update_pop()
self.halloffame.update(self.population)
self.logbook.record(gen=g, evals=self._params['evals'],
**self.stats.compile(self.population))
print(self.logbook.stream)
end_time = datetime.datetime.now()
time_taken = end_time - start_time
self._params['time_taken'] = time_taken
print("Evaluated {0} models in total".format(
self._params['model_count']))
print("Best fitness is {0}".format(self.halloffame[0].fitness))
print("Best parameters are {0}".format(self.parse_individual(
self.halloffame[0])))
for i, entry in enumerate(self.halloffame[0]):
if entry > 0.95:
print(
"Warning! Parameter {0} is at or near maximum allowed "
"value\n".format(i + 1))
elif entry < -0.95:
print(
"Warning! Parameter {0} is at or near minimum allowed "
"value\n".format(i + 1))
if self._params['log']:
self.log_results()
if self._params['plot']:
print('----Minimisation plot:')
plt.figure(figsize=(5, 5))
plt.plot(range(len(self.logbook.select('min'))),
self.logbook.select('min'))
plt.xlabel('Iteration', fontsize=20)
plt.ylabel('Score', fontsize=20)
def parameters(self, sequence, value_means, value_ranges, arrangement):
"""Relates the individual to be evolved to the full parameter string.
Parameters
----------
sequence: str
Full amino acid sequence for specification object to be
optimized. Must be equal to the number of residues in the
model.
value_means: list
List containing mean values for parameters to be optimized.
value_ranges: list
List containing ranges for parameters to be optimized.
Values must be positive.
arrangement: list
Full list of fixed and variable parameters for model
building. Fixed values are the appropriate value. Values
to be varied should be listed as 'var0', 'var1' etc,
and must be in ascending numerical order.
Variables can be repeated if required.
"""
self._params['sequence'] = sequence
self._params['value_means'] = value_means
self._params['value_ranges'] = value_ranges
self._params['arrangement'] = arrangement
if any(x <= 0 for x in self._params['value_ranges']):
raise ValueError("range values must be greater than zero")
self._params['variable_parameters'] = []
for i in range(len(self._params['value_means'])):
self._params['variable_parameters'].append(
"".join(['var', str(i)]))
if len(set(arrangement).intersection(
self._params['variable_parameters'])) != len(
self._params['value_means']):
raise ValueError("argument mismatch!")
if len(self._params['value_ranges']) != len(
self._params['value_means']):
raise ValueError("argument mismatch!")
def assign_fitnesses(self):
raise NotImplementedError("Will depend on evaluation subclass")
# must always operate on whole population- bounds checking etc
# to be done internally
def generate(self):
raise NotImplementedError("Will depend on optimizer type")
def initialize_pop(self):
raise NotImplementedError("Will depend on optimizer type")
def update_pop(self):
raise NotImplementedError("Will depend on optimizer type")
def log_results(self):
"""Saves files for the minimization.
Notes
-----
Currently saves a logfile with best individual and a pdb of
the best model.
"""
best_ind = self.halloffame[0]
model_params = self.parse_individual(
best_ind) # need to change name of 'params'
with open(
'{0}{1}_log.txt'.format(
self._params['output_path'],
self._params['run_id']), 'a+') as log_file:
log_file.write('\nEvaluated {0} models in total\n'.format(
self._params['model_count']))
log_file.write('Run ID is {0}\n'.format(self._params['run_id']))
log_file.write('Best fitness is {0}\n'.format(
self.halloffame[0].fitness))
log_file.write(
'Parameters of best model are {0}\n'.format(model_params))
log_file.write(
'Best individual is {0}\n'.format(self.halloffame[0]))
for i, entry in enumerate(self.halloffame[0]):
if entry > 0.95:
log_file.write(
"Warning! Parameter {0} is at or near maximum allowed "
"value\n".format(i + 1))
elif entry < -0.95:
log_file.write(
"Warning! Parameter {0} is at or near minimum allowed "
"value\n".format(i + 1))
log_file.write('Minimization history: \n{0}'.format(self.logbook))
with open('{0}{1}_bestmodel.pdb'.format(
self._params['output_path'],
self._params['run_id']), 'w') as output_file:
model = self._params['specification'](*model_params)
model.build()
model.pack_new_sequences(self._params['sequence'])
output_file.write(model.pdb)
@property
def best_model(self):
"""Rebuilds the top scoring model from an optimisation.
Returns
-------
model: AMPAL
Returns an AMPAL model of the top scoring parameters.
Raises
------
NameError:
Raises a name error if the optimiser has not been run.
"""
if hasattr(self, 'halloffame'):
model = self._params['specification'](
*self.parse_individual(self.halloffame[0]))
model.pack_new_sequences(self._params['sequence'])
return model
else:
raise NameError('No best model found, have you ran the optimiser?')
class BaseScore(BaseOptimizer):
""" Assigns BUFF score as fitness to individuals in optimization."""
evaluation_function = staticmethod(buff_eval)
def assign_fitnesses(self, targets):
self._params['evals'] = len(targets)
px_parameters = zip([self._params['specification']] * len(targets),
[self._params['sequence']] * len(targets),
[self.parse_individual(x) for x in targets])
if (self._params['processors'] == 1) or (sys.platform == 'win32'):
fitnesses = map(self.evaluation_function, px_parameters)
else:
with futures.ProcessPoolExecutor(
max_workers=self._params['processors']) as executor:
fitnesses = executor.map(
self.evaluation_function, px_parameters)
tars_fits = list(zip(targets, fitnesses))
if 'log_params' in self._params:
if self._params['log_params']:
self.parameter_log.append(
[(self.parse_individual(x[0]), x[1]) for x in tars_fits])
for ind, fit in tars_fits:
ind.fitness.values = (fit,)
def make_energy_funnel_data(self, cores=1):
"""Compares models created during the minimisation to the best model.
Returns
-------
energy_rmsd_gen: [(float, float, int)]
A list of triples containing the BUFF score, RMSD to the
top model and generation of a model generated during the
minimisation.
"""
if not self.parameter_log:
raise AttributeError(
'No parameter log data to make funnel, have you ran the '
'optimiser?')
model_cls = self._params['specification']
gen_tagged = []
for gen, models in enumerate(self.parameter_log):
for model in models:
gen_tagged.append((model[0], model[1], gen))
sorted_pps = sorted(gen_tagged, key=lambda x: x[1])
top_result = sorted_pps[0]
top_result_model = model_cls(*top_result[0])
if (cores == 1) or (sys.platform == 'win32'):
energy_rmsd_gen = map(
self.funnel_rebuild,
[(x, top_result_model,
self._params['specification']) for x in sorted_pps[1:]])
else:
with futures.ProcessPoolExecutor(
max_workers=self._params['processors']) as executor:
energy_rmsd_gen = executor.map(
self.funnel_rebuild,
[(x, top_result_model, self._params['specification'])
for x in sorted_pps[1:]])
return list(energy_rmsd_gen)
@staticmethod
def funnel_rebuild(psg_trm_spec):
"""Rebuilds a model and compares it to a reference model.
Parameters
----------
psg_trm: (([float], float, int), AMPAL, specification)
A tuple containing the parameters, score and generation for a
model as well as a model of the best scoring parameters.
Returns
-------
energy_rmsd_gen: (float, float, int)
A triple containing the BUFF score, RMSD to the top model
and generation of a model generated during the minimisation.
"""
param_score_gen, top_result_model, specification = psg_trm_spec
params, score, gen = param_score_gen
model = specification(*params)
rmsd = top_result_model.rmsd(model)
return rmsd, score, gen
class BaseInternalScore(BaseScore):
"""Assigns BUFF score as fitness to individuals in optimization."""
evaluation_function = staticmethod(buff_internal_eval)
class BaseRMSD(BaseOptimizer):
"""Assigns RMSD as fitness to individuals in optimization."""
def assign_fitnesses(self, targets):
self._params['evals'] = len(targets)
px_parameters = zip([self._params['specification']] * len(targets),
[self._params['sequence']] * len(targets),
[self.parse_individual(x) for x in targets],
[self._params['ref_pdb']] * len(targets))
if (self._params['processors'] == 1) or (sys.platform == 'win32'):
fitnesses = map(rmsd_eval, px_parameters)
else:
with futures.ProcessPoolExecutor(
max_workers=self._params['processors']) as executor:
fitnesses = executor.map(rmsd_eval, px_parameters)
for ind, fit in zip(targets, fitnesses):
ind.fitness.values = (fit,)
class BaseComparator(BaseOptimizer):
"""Optimises BUFF score for two rigidbody models.
Notes
-----
Assigns individual fitness to be change in BUFF score on positioning
two predefined models according to parameters from individual.
Allows basic rigid body docking between two AMPAL objects with side
chain repacking in order to estimate interactions.
"""
def assign_fitnesses(self, targets):
self._params['evals'] = len(targets)
px_parameters = zip([self._params['top1']] * len(targets),
[self._params['top2']] * len(targets),
[self._params['params1']] * len(targets),
[self._params['params2']] * len(targets),
[self._params['seq1']] * len(targets),
[self._params['seq2']] * len(targets),
[self.parse_individual(x) for x in targets])
if (self._params['processors'] == 1) or (sys.platform == 'win32'):
fitnesses = map(comparator_eval, px_parameters)
else:
with futures.ProcessPoolExecutor(
max_workers=self._params['processors']) as executor:
fitnesses = executor.map(comparator_eval, px_parameters)
for ind, fit in zip(targets, fitnesses):
ind.fitness.values = (
fit - (self._params['ref1'] + self._params['ref2']),)
def parameters(self, value_means, value_ranges, arrangement):
"""Creates full parameter string for building the specification object.
Notes
-----
Special version for comparator type optimizers that doesn't
require sequence. Should take up to six parameters defining
the x, y, z rotations and x, y, z translations in that order.
For example testing rotation of 60 +/- 20 degrees around the
z axis at a displacement of 20 +/- 10 Angstrom would require:
value_means = [60, 20], value_ranges = [20, 10],
arrangement = ['var0', 0, 0, 'var1', 0, 0]
Parameters
----------
value_means: list
List containing mean values for parameters to be optimized.
value_ranges: list
List containing ranges for parameters to be optimized.
Values must be positive.
arrangement: list
Full list of fixed and variable parameters for model building.
Fixed values are the appropriate value. Values to be varied
should be listed as 'var0', 'var1' etc, and must be in
ascending numerical order. Variables can be repeated if
required.
"""
self._params['value_means'] = value_means
self._params['value_ranges'] = value_ranges
self._params['arrangement'] = arrangement
if any(x <= 0 for x in self._params['value_ranges']):
raise ValueError("range values must be greater than zero")
self._params['variable_parameters'] = []
for i in range(len(self._params['value_means'])):
self._params['variable_parameters'].append(
"".join(['var', str(i)]))
if len(set(arrangement).intersection(
self._params['variable_parameters'])) != len(
self._params['value_means']):
raise ValueError("argument mismatch!")
if len(self._params['value_ranges']) != len(
self._params['value_means']):
raise ValueError("argument mismatch!")
class OptDE:
"""Differential evolution optimisation algorithm.
Notes
-----
Can use neighbourhood model to reduce chance of getting stuck
in local optima. This is a very versatile algorithm, and its use
is recommended.
"""
def __init__(self, **kwargs):
super().__init__()
self._params.update(**kwargs)
self._params.setdefault('cxpb', 0.75)
self._params.setdefault('diff_weight', 1)
self._params.setdefault('output_path', None)
self._params.setdefault('neighbours', None)
creator.create("Individual", list, fitness=creator.FitnessMin)
def generate(self):
"""Generates a particle using the creator function.
Notes
-----
Position and speed are uniformly randomly seeded within
allowed bounds. The particle also has speed limit settings
taken from global values.
Returns
-------
particle object
"""
ind = creator.Individual(
[random.uniform(-1, 1)
for _ in range(len(self._params['value_means']))])
ind.ident = None
ind.neighbours = None
return ind
def initialize_pop(self):
"""Assigns indices to individuals in population."""
self.toolbox.register("individual", self.generate)
self.toolbox.register("population", tools.initRepeat,
list, self.toolbox.individual)
self.population = self.toolbox.population(n=self._params['popsize'])
if self._params['neighbours']:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = list(
set(
[(i - x) % len(self.population)
for x in range(1, self._params['neighbours'] + 1)] +
[(i + x) % len(self.population)
for x in range(1, self._params['neighbours'] + 1)]
))
self.assign_fitnesses(self.population)
def crossover(self, ind):
"""Used by the evolution process to generate a new individual.
Notes
-----
This is a tweaked version of the classical DE crossover
algorithm, the main difference that candidate parameters are
generated using a lognormal distribution. Bound handling is
achieved by resampling where the candidate solution exceeds +/-1
Parameters
----------
Returns
-------
y: deap individual
An individual representing a candidate solution, to be
assigned a fitness.
"""
if self._params['neighbours']:
a, b, c = random.sample([self.population[i]
for i in ind.neighbours], 3)
else:
a, b, c = random.sample(self.population, 3)
y = self.toolbox.clone(a)
y.ident = ind.ident
y.neighbours = ind.neighbours
del y.fitness.values
# y should now be a copy of ind with the vector elements from a
ident = random.randrange(len(self._params['value_means']))
for i, value in enumerate(y):
if i == ident or random.random() < self._params['cxpb']:
entry = a[i] + random.lognormvariate(-1.2, 0.5) * \
self._params['diff_weight'] * (b[i] - c[i])
tries = 0
while abs(entry) > 1.0:
tries += 1
entry = a[i] + random.lognormvariate(-1.2, 0.5) * \
self._params['diff_weight'] * (b[i] - c[i])
if tries > 10000:
entry = a[i]
y[i] = entry
return y
def update_pop(self):
"""Updates the population according to crossover and fitness criteria.
"""
candidates = []
for ind in self.population:
candidates.append(self.crossover(ind))
self._params['model_count'] += len(candidates)
self.assign_fitnesses(candidates)
for i in range(len(self.population)):
if candidates[i].fitness > self.population[i].fitness:
self.population[i] = candidates[i]
class OptPSO:
"""A particle swarm optimization algorithm.
Notes
-----
This is good for avoiding bias and premature minimization, though
it may struggle to find the ultimate optimum solution. Supports
the neighbourhood model. Bound handling is achieved by allowing
particles to exceed permitted bounds, but not assigning them a
fitness in this case.
"""
def __init__(self, **kwargs):
self.population = None
super().__init__()
self._params.update(**kwargs)
self._params.setdefault('output_path', None)
self._params.setdefault('max_speed', 0.75)
self._params.setdefault('neighbours', None)
creator.create("Particle", list, fitness=creator.FitnessMin,
speed=list, smin=None, smax=None, best=None)
self.toolbox.register("particle", self.generate)
# can this pick up the global fitness?
creator.create("Swarm", list, gbest=None, gbestfit=creator.FitnessMin)
self.toolbox.register("swarm", tools.initRepeat,
creator.Swarm, self.toolbox.particle)
def initialize_pop(self):
"""Generates initial population with random positions and speeds."""
self.population = self.toolbox.swarm(n=self._params['popsize'])
if self._params['neighbours']:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = list(
set(
[(i - x) % len(self.population)
for x in range(1, self._params['neighbours'] + 1)] +
[i] +
[(i + x) % len(self.population)
for x in range(1, self._params['neighbours'] + 1)]
))
else:
for i in range(len(self.population)):
self.population[i].ident = i
self.population[i].neighbours = [
x for x in range(len(self.population))]
self.assign_fitnesses(self.population)
for part in self.population:
part.best = creator.Particle(part)
part.best.fitness.values = part.fitness.values
# self.pop.gbestfit = max(part.fitness for part in self.pop)
# self.pop.gbest = max(enumerate(self.pop),
# key=lambda x: self.pop[x[0]].fitness)[1]
def generate(self):
"""Generates a particle using the creator function.
Notes
-----
Position and speed are uniformly randomly seeded within
allowed bounds. The particle also has speed limit settings
taken from global values.
Returns
-------
particle object
"""
part = creator.Particle(
[random.uniform(-1, 1)
for _ in range(len(self._params['value_means']))])
part.speed = [
random.uniform(-self._params['max_speed'],
self._params['max_speed'])
for _ in range(len(self._params['value_means']))]
part.smin = -self._params['max_speed']
part.smax = self._params['max_speed']
part.ident = None
part.neighbours = None
return part
def update_particle(self, part, chi=0.729843788, c=2.05):
"""Constriction factor update particle method.
Notes
-----
Looks for a list of neighbours attached to a particle and
uses the particle's best position and that of the best
neighbour.
"""
neighbour_pool = [self.population[i] for i in part.neighbours]
best_neighbour = max(neighbour_pool, key=lambda x: x.best.fitness)
ce1 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce2 = (c * random.uniform(0, 1) for _ in range(len(part)))
ce1_p = map(operator.mul, ce1, map(operator.sub, part.best, part))
ce2_g = map(operator.mul, ce2, map(
operator.sub, best_neighbour.best, part))
chi_list = [chi] * len(part)
chi_list2 = [1 - chi] * len(part)
a = map(operator.sub,
map(operator.mul, chi_list, map(operator.add, ce1_p, ce2_g)),
map(operator.mul, chi_list2, part.speed))
part.speed = list(map(operator.add, part.speed, a))
for i, speed in enumerate(part.speed):
if speed < part.smin:
part.speed[i] = part.smin
elif speed > part.smax:
part.speed[i] = part.smax
part[:] = list(map(operator.add, part, part.speed))
def update_pop(self):
"""Assigns fitnesses to particles that are within bounds."""
valid_particles = []
invalid_particles = []
for part in self.population:
if any(x > 1 or x < -1 for x in part):
invalid_particles.append(part)
else:
valid_particles.append(part)
self._params['model_count'] += len(valid_particles)
for part in valid_particles:
self.update_particle(part)
self.assign_fitnesses(valid_particles)
for part in valid_particles:
if part.fitness > part.best.fitness:
part.best = creator.Particle(part)
part.best.fitness = part.fitness
for part in invalid_particles:
self.update_particle(part)
self.population[:] = valid_particles + invalid_particles
self.population.sort(key=lambda x: x.ident) # shouldn't need to sort?
# for part in self.population:
# if part.best.fitness < part.fitness:
# part.best = creator.Particle(part)
# part.best.fitness.values = part.fitness.values
# self.pop.gbestfit = max(part.fitness for part in self.pop)
# this is the current best, not the all time best
# self.pop.gbest = max(enumerate(self.pop),
# key=lambda x: self.pop[x[0]].fitness)[1]
# but these aren't used anyway
class OptGA:
"""A classic genetic algorithm optimization algorithm.
Notes
-----
Arguably the weakest of the algorithms available, but very good
for eliminating unfavourable regions of the search space. Can be
heavily customized in terms of mutation and crossover operators
etc. Bound handling is achieved simply by amending any out of
bounds parameters to the boundary value.
"""
def __init__(self, **kwargs):
super().__init__()
self._params.update(**kwargs)
self._params.setdefault('output_path', None)
self._params.setdefault('cxpb', 0.5)
self._params.setdefault('mutpb', 0.2)
creator.create("Individual", list, fitness=creator.FitnessMin)
self.toolbox.register("mate", tools.cxBlend, alpha=0.2)
self.toolbox.register("mutate", tools.mutGaussian,
mu=0, sigma=0.2, indpb=0.4)
self.toolbox.register("select", tools.selTournament)
def generate(self):
"""Generates individual with random parameters within allowed bounds.
"""
ind = creator.Individual(
[random.uniform(-1, 1)
for _ in range(len(self._params['value_means']))])
return ind
def initialize_pop(self):
"""Assigns initial fitnesses."""
self.toolbox.register("individual", self.generate)
self.toolbox.register("population", tools.initRepeat,
list, self.toolbox.individual)
self.population = self.toolbox.population(n=self._params['popsize'])
self.assign_fitnesses(self.population)
self._params['model_count'] += len(self.population)
def update_pop(self):
offspring = list(map(self.toolbox.clone, self.population))
# offspring.sort(reverse=True, key=lambda x: x.fitness)
for _ in range(self._params['popsize'] // 2):
if random.random() < self._params['cxpb']:
child1, child2 = self.toolbox.select(self.population, 2, 6)
temp1 = self.toolbox.clone(child1)
temp2 = self.toolbox.clone(child2)
self.toolbox.mate(temp1, temp2)
del temp1.fitness.values
del temp2.fitness.values
offspring.append(temp1)
offspring.append(temp2)
# for child1, child2 in zip(offspring[::2], offspring[1::2]):
# if random.random() < self._params['cxpb']:
# self.toolbox.mate(child1, child2)
# del child1.fitness.values
# del child2.fitness.values
for mutant in offspring:
if random.random() < self._params['mutpb']:
self.toolbox.mutate(mutant)
del mutant.fitness.values
# simple bound checking
for i in range(len(offspring)):
for j in range(len(offspring[i])):
if offspring[i][j] > 1:
offspring[i][j] = 1
if offspring[i][j] < -1:
offspring[i][j] = -1
self._params['model_count'] += len(
[ind for ind in offspring if not ind.fitness.values])
self.assign_fitnesses(
[ind for ind in offspring if not ind.fitness.valid])
offspring.sort(reverse=True, key=lambda x: x.fitness)
if len(self.halloffame) != 0:
# elitism- if none beat best so far it is reinserted
if offspring[0].fitness < self.halloffame[0].fitness:
offspring.insert(0, self.halloffame[0])
self.population[:] = offspring[:self._params['popsize']]
class OptCMAES:
"""Covariance matrix adaptation evolutionary strategy optimizer.
Notes
-----
Basically uses a covariance matrix at each step to identify the
'direction' of the optimal solution in the search space, and
generates new individuals accordingly. Bound handling is achieved
by moving any out of bounds parameters to the boundary condition.
Other than that the implementation used here is as in the
originating code from the deap module.
"""
def __init__(self, **kwargs):
super().__init__()
self._params.update(**kwargs)
self._params.setdefault('sigma', 0.3)
self._params.setdefault('weights', 'superlinear')
creator.create("Individual", list, fitness=creator.FitnessMin)
def initialize_pop(self):
"""Generates the initial population and assigns fitnesses."""
self.initialize_cma_es(
sigma=self._params['sigma'], weights=self._params['weights'],
lambda_=self._params['popsize'],
centroid=[0] * len(self._params['value_means']))
self.toolbox.register("individual", self.make_individual)
self.toolbox.register("generate", self.generate,
self.toolbox.individual)
self.toolbox.register("population", tools.initRepeat,
list, self.initial_individual)
self.toolbox.register("update", self.update)
self.population = self.toolbox.population(n=self._params['popsize'])
self.assign_fitnesses(self.population)
self._params['model_count'] += len(self.population)
def initial_individual(self):
"""Generates an individual with random parameters within bounds."""
ind = creator.Individual(
[random.uniform(-1, 1)
for _ in range(len(self._params['value_means']))])
return ind
def update_pop(self):
self.toolbox.generate()
# simple bound checking
for i in range(len(self.population)):
for j in range(len(self.population[i])):
if self.population[i][j] > 1:
self.population[i][j] = 1
if self.population[i][j] < -1:
self.population[i][j] = -1
self.assign_fitnesses(self.population)
self.toolbox.update(self.population)
self._params['model_count'] += len(self.population)
return
def make_individual(self, paramlist):
part = creator.Individual(paramlist)
part.ident = None
return part
def initialize_cma_es(self, **kwargs):
"""A strategy that will keep track of the basic parameters.
Notes
-----
+------------+---------------------------+----------------------------+
| Parameter | Default | Details |
+============+===========================+============================+
| ``lambda_``| ``int(4 + 3 * log(N))`` | Number of children to |
| | | produce at each generation,|
| | | ``N`` is the individual's |
| | | size (integer). |
+------------+---------------------------+----------------------------+
| ``mu`` | ``int(lambda_ / 2)`` | The number of parents to |
| | | keep from the |
| | | lambda children (integer). |
+------------+---------------------------+----------------------------+
| ``cmatrix``| ``identity(N)`` | The initial covariance |
| | | matrix of the distribution |
| | | that will be sampled. |
+------------+---------------------------+----------------------------+
| ``weights``| ``"superlinear"`` | Decrease speed, can be |
| | | ``"superlinear"``, |
| | | ``"linear"`` or |
| | | ``"equal"``. |
+------------+---------------------------+----------------------------+
| ``cs`` | ``(mueff + 2) / | Cumulation constant for |
| | (N + mueff + 3)`` | step-size. |
+------------+---------------------------+----------------------------+
| ``damps`` | ``1 + 2 * max(0, sqrt(( | Damping for step-size. |
| | mueff - 1) / (N + 1)) - 1)| |
| | + cs`` | |
+------------+---------------------------+----------------------------+
| ``ccum`` | ``4 / (N + 4)`` | Cumulation constant for |
| | | covariance matrix. |
+------------+---------------------------+----------------------------+
| ``ccov1`` | ``2 / ((N + 1.3)^2 + | Learning rate for rank-one |
| | mueff)`` | update. |
+------------+---------------------------+----------------------------+
| ``ccovmu`` | ``2 * (mueff - 2 + 1 / | Learning rate for rank-mu |
| | mueff) / ((N + 2)^2 + | update. |
| | mueff)`` | |
+------------+---------------------------+----------------------------+
Parameters
----------
centroid:
An iterable object that indicates where to start the
evolution.
sigma:
The initial standard deviation of the distribution.
parameter:
One or more parameter to pass to the strategy as
described in the following table, optional.
"""
self.params = kwargs
# Create a centroid as a numpy array
self.centroid = numpy.array([0] * len(self._params['value_means']))
self.dim = len(self.centroid)
self.sigma = self.params.get("sigma", 0.5)
self.pc = numpy.zeros(self.dim)
self.ps = numpy.zeros(self.dim)
self.chiN = numpy.sqrt(self.dim) * (
1 - 1. / (4. * self.dim) + 1. / (21. * self.dim ** 2))
self.C = self.params.get("cmatrix", numpy.identity(self.dim))
self.diagD, self.B = numpy.linalg.eigh(self.C)
indx = numpy.argsort(self.diagD)
self.diagD = self.diagD[indx] ** 0.5
self.B = self.B[:, indx]
self.BD = self.B * self.diagD
self.cond = self.diagD[indx[-1]] / self.diagD[indx[0]]
self.lambda_ = self.params.get(
"lambda_", int(4 + 3 * numpy.log(self.dim)))
self.update_count = 0
self.computeParams(self.params)
return
def generate(self, func):
"""Generate a population of :math:`\lambda` individuals.
Notes
-----
Individuals are of type *ind_init* from the current strategy.
Parameters
----------
ind_init:
A function object that is able to initialize an
individual from a list.
"""
arz = numpy.random.standard_normal((self.lambda_, self.dim))
arz = self.centroid + self.sigma * numpy.dot(arz, self.BD.T)
self.population = list(map(func, arz))
return
def update(self, population):
"""Update the covariance matrix strategy from the *population*.
Parameters
----------
population:
A list of individuals from which to update the
parameters.
"""
population.sort(key=lambda ind: ind.fitness, reverse=True)
old_centroid = self.centroid
self.centroid = numpy.dot(self.weights, population[0:self.mu])
c_diff = self.centroid - old_centroid
# Cumulation : update evolution path
self.ps = (1 - self.cs) * self.ps \
+ numpy.sqrt(self.cs * (2 - self.cs) * self.mueff) / self.sigma \
* numpy.dot(self.B, (1. / self.diagD)
* numpy.dot(self.B.T, c_diff))
hsig = float((numpy.linalg.norm(self.ps) /
numpy.sqrt(1. - (1. - self.cs) **
(2. * (self.update_count + 1.))) / self.chiN
< (1.4 + 2. / (self.dim + 1.))))
self.update_count += 1
self.pc = (1 - self.cc) * self.pc + hsig \
* numpy.sqrt(self.cc * (2 - self.cc) * self.mueff) / self.sigma \
* c_diff
# Update covariance matrix
artmp = population[0:self.mu] - old_centroid
self.C = (1 - self.ccov1 - self.ccovmu + (1 - hsig)
* self.ccov1 * self.cc * (2 - self.cc)) * self.C \
+ self.ccov1 * numpy.outer(self.pc, self.pc) \
+ self.ccovmu * numpy.dot((self.weights * artmp.T), artmp) \
/ self.sigma ** 2
self.sigma *= numpy.exp((numpy.linalg.norm(self.ps) / self.chiN - 1.)
* self.cs / self.damps)
self.diagD, self.B = numpy.linalg.eigh(self.C)
indx = numpy.argsort(self.diagD)
self.cond = self.diagD[indx[-1]] / self.diagD[indx[0]]
self.diagD = self.diagD[indx] ** 0.5
self.B = self.B[:, indx]
self.BD = self.B * self.diagD
def computeParams(self, params):
"""Computes the parameters depending on :math:`\lambda`.
Notes
-----
It needs to be called again if :math:`\lambda` changes during
evolution.
Parameters
----------
params:
A dictionary of the manually set parameters.
"""
self.mu = params.get("mu", int(self.lambda_ / 2))
rweights = params.get("weights", "superlinear")
if rweights == "superlinear":
self.weights = numpy.log(self.mu + 0.5) - \
numpy.log(numpy.arange(1, self.mu + 1))
elif rweights == "linear":
self.weights = self.mu + 0.5 - numpy.arange(1, self.mu + 1)
elif rweights == "equal":
self.weights = numpy.ones(self.mu)
else:
raise RuntimeError("Unknown weights : %s" % rweights)
self.weights /= sum(self.weights)
self.mueff = 1. / sum(self.weights ** 2)
self.cc = params.get("ccum", 4. / (self.dim + 4.))
self.cs = params.get("cs", (self.mueff + 2.) /
(self.dim + self.mueff + 3.))
self.ccov1 = params.get(
"ccov1", 2. / ((self.dim + 1.3) ** 2 + self.mueff))
self.ccovmu = params.get("ccovmu", 2. * (
self.mueff - 2. + 1. / self.mueff) / (
(self.dim + 2.) ** 2 + self.mueff))
self.ccovmu = min(1 - self.ccov1, self.ccovmu)
self.damps = 1. + 2. * \
max(0, numpy.sqrt((self.mueff - 1.) / (self.dim + 1.)) - 1.) + \
self.cs
self.damps = params.get("damps", self.damps)
return
class DE_Opt(OptDE, BaseScore):
"""Class for DE algorithm optimizing BUFF fitness."""
def __init__(self, specification, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
class DE_Opt_Internal(OptDE, BaseInternalScore):
"""
Class for DE algorithm optimizing BUFF internal enegyfitness
"""
def __init__(self, specification, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
class DE_RMSD(OptDE, BaseRMSD):
"""
Class for DE algorithm optimizing RMSD against target model
"""
def __init__(self, specification, ref_pdb, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
self._params['ref_pdb'] = ref_pdb
class DE_Comparator(OptDE, BaseComparator):
"""Class for DE algorithm optimizing docking of two AMPAL objects."""
def __init__(self, top1, top2, params1, params2, seq1, seq2, **kwargs):
super().__init__(**kwargs)
self._params['top1'] = top1
self._params['top2'] = top2
self._params['params1'] = params1
self._params['params2'] = params2
self._params['seq1'] = seq1
self._params['seq2'] = seq2
obj1 = top1(*params1)
obj1.pack_new_sequences(seq1)
obj2 = top2(*params2)
obj2.pack_new_sequences(seq2)
self._params['ref1'] = obj1.buff_interaction_energy.total_energy
self._params['ref2'] = obj2.buff_interaction_energy.total_energy
class PSO_Opt(OptPSO, BaseScore):
"""Class for PSO algorithm optimizing BUFF fitness."""
def __init__(self, specification, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
class PSO_RMSD(OptPSO, BaseRMSD):
"""Class for PSO algorithm optimizing RMSD against target model."""
def __init__(self, specification, ref_pdb, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
self._params['ref_pdb'] = ref_pdb
class PSO_Comparator(OptPSO, BaseComparator):
"""Class for PSO algorithm optimizing docking of two AMPAL objects."""
def __init__(self, top1, top2, params1, params2, seq1, seq2, **kwargs):
super().__init__(**kwargs)
self._params['top1'] = top1
self._params['top2'] = top2
self._params['params1'] = params1
self._params['params2'] = params2
self._params['seq1'] = seq1
self._params['seq2'] = seq2
obj1 = top1(*params1)
obj1.pack_new_sequences(seq1)
obj2 = top2(*params2)
obj2.pack_new_sequences(seq2)
self._params['ref1'] = obj1.buff_interaction_energy.total_energy
self._params['ref2'] = obj2.buff_interaction_energy.total_energy
class GA_Opt(OptGA, BaseScore):
"""Class for GA algorithm optimizing BUFF fitness."""
def __init__(self, specification, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
class GA_Opt_Internal(OptGA, BaseInternalScore):
"""Class for GA algorithm optimizing BUFF internal energy."""
def __init__(self, specification, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
class GA_RMSD(OptGA, BaseRMSD):
"""Class for GA algorithm optimizing RMSD against target model."""
def __init__(self, specification, ref_pdb, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
self._params['ref_pdb'] = ref_pdb
class GA_Comparator(OptGA, BaseComparator):
"""Class for GA algorithm optimizing docking of two AMPAL objects."""
def __init__(self, top1, top2, params1, params2, seq1, seq2, **kwargs):
super().__init__(**kwargs)
self._params['top1'] = top1
self._params['top2'] = top2
self._params['params1'] = params1
self._params['params2'] = params2
self._params['seq1'] = seq1
self._params['seq2'] = seq2
obj1 = top1(*params1)
obj1.pack_new_sequences(seq1)
obj2 = top2(*params2)
obj2.pack_new_sequences(seq2)
self._params['ref1'] = obj1.buff_interaction_energy.total_energy
self._params['ref2'] = obj2.buff_interaction_energy.total_energy
class CMAES_Opt(OptCMAES, BaseScore):
"""Class for CMAES algorithm optimizing BUFF fitness."""
def __init__(self, specification, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
class CMAES_RMSD(OptCMAES, BaseRMSD):
"""Class for CMAES algorithm optimizing RMSD against target model."""
def __init__(self, specification, ref_pdb, **kwargs):
super().__init__(**kwargs)
self._params['specification'] = specification
self._params['ref_pdb'] = ref_pdb
class CMAES_Comparator(OptCMAES, BaseComparator):
"""Class for CMAES algorithm optimizing docking of two AMPAL objects."""
def __init__(self, top1, top2, params1, params2, seq1, seq2, **kwargs):
super().__init__(**kwargs)
self._params['top1'] = top1
self._params['top2'] = top2
self._params['params1'] = params1
self._params['params2'] = params2
self._params['seq1'] = seq1
self._params['seq2'] = seq2
obj1 = top1(*params1)
obj1.pack_new_sequences(seq1)
obj2 = top2(*params2)
obj2.pack_new_sequences(seq2)
self._params['ref1'] = obj1.buff_interaction_energy.total_energy
self._params['ref2'] = obj2.buff_interaction_energy.total_energy
__author__ = 'Andrew R. Thomson, Christopher W. Wood, Gail J. Bartlett'
__status__ = 'Development'
| |
import json
from django.contrib.admin.views.decorators import staff_member_required
from django.db.models import Q
from django.http import *
from django.shortcuts import *
from django.template import Context
from django.template.loader import get_template
from alphacabbage.django.helpers import get_pair_or_404
from alphacabbage.django.decorators import require_post
from gamesoup.games.models import *
from gamesoup.expressions.syntax import Expr
###############################################################################
# FLOW
@staff_member_required
def game_flow(request, game_id, format):
import yapgvb, os.path
game = get_object_or_404(Game, pk=game_id)
# subprocess.call(['dot', '-T', 'png'], )
response = HttpResponse(mimetype=format == 'svg' and 'image/svg+xml' or 'image/png')
g = yapgvb.Digraph('Flow')
# g.rankdir = 'LR'
nodes = {}
for obj in game.object_set.all():
n = g.add_node(obj.id, label=str(obj).replace('"', '\\"'))
n.width = len(str(obj)) / 10
n.style = 'filled'
if obj.type.visible:
n.shape = 'box'
if obj.type.has_state:
if obj.per_player:
n.fillcolor = '#6699cc'
else:
n.fillcolor = '#99ccff'
nodes[obj.id] = n
for type in Type.objects.filter(bound_to__instance__game=game).distinct():
n = g.add_node('type_%d' % type.id, label=str(type).replace('"', '\\"'))
n.width = len(str(type)) / 10
n.style = 'filled'
n.fillcolor = 'black'
n.fontcolor = 'white'
nodes['type_%d' % type.id] = n
bindings = ObjectParameterBinding.objects.filter(instance__game=game)
for ref in bindings.filter(parameter__type_parameter__is_factory=False):
if ref.parameter.is_built_in: continue
e = g.add_edge(nodes[ref.instance.id], nodes[ref.object_argument.id])
e.label = ref.parameter.name
e.color = 'gray'
e.fontcolor = 'blue'
e.labelfloat = True
e.fontsize = 14
e.len = 3
for factory in bindings.filter(parameter__type_parameter__is_factory=True):
if factory.parameter.is_built_in: continue
e = g.add_edge(nodes[factory.instance.id], nodes['type_%d' % factory.type_argument.id])
e.label = ''#factory.parameter.type_parameter.of_type.name
e.color = 'gray'
e.fontcolor = 'gray'
e.labelfloat = True
e.fontsize = 14
e.len = 3
# Danglers
for param in ObjectParameter.objects.filter(type_parameter__of_type__instances__game=game).distinct():
if param.is_built_in: continue
if param.binding is None:
obj = param.of_object
n = g.add_node('missing_param_%d_%d' % (obj.id, param.id), label='')
n.color = 'white'
e = g.add_edge(nodes[obj.id], n)
e.color = 'red'
e.fontcolor = 'red'
e.label = param.name
g.layout(yapgvb.engines.dot)
scratch_path = os.path.join(settings.MEDIA_ROOT, 'flow-scratch', 'game-%d.%s' % (game.id, format))
g.render(scratch_path)
scratch = open(scratch_path)
response.write(scratch.read())
return response
###############################################################################
# CODE
@staff_member_required
def game_code(request, game_id):
game = get_object_or_404(Game, pk=game_id)
t = get_template('games/game/code.js')
query = Q(instances__game=game) | Q(bound_to__instance__game=game)
types = Type.objects.filter(query).distinct().order_by('name')
c = Context({
'game': game,
'types': types,
'objects': game.object_set.all(),
'stateful_objects': game.object_set.filter(type__has_state=True),
'visible_objects': game.object_set.filter(type__visible=True),
})
response = HttpResponse(mimetype='text/javascript')
response.write(t.render(c))
return response
###############################################################################
# SEARCH
#
# In these methods we use "or [0]" because of the way the type ids are used by
# the javascript client. Eventually, these types are used in a query to a
# django change list view that takes the form "id__in=%s" % ','.join(type_ids)
# If we use an empty list, this results in an invalid query, and django by
# default will show all types, which is not what we want. If we use [0]
# instead, then the query reads "id__in=0", which will not return any types.
@staff_member_required
@require_post
def search_requires(request):
obj_ids = map(int, filter(bool, request.POST['object_ids'].split(',')))
if obj_ids:
qs = Type.objects.all()
for obj_id in obj_ids:
obj = Object.objects.get(pk=obj_id)
qs = qs.filter(parameters___interfaces__implemented_by=obj.type).distinct()
def d(t):
params = t.parameters.filter(_is_ref=True)
n = params.count()
params2 = params.exclude(_interfaces__implemented_by__instances__id__in=obj_ids).distinct()
return params2.count() <= n - len(obj_ids)
type_ids = [t.id for t in qs if d(t)]
else:
type_ids = []
response = HttpResponse(mimetype='application/json')
response.write(json.dumps(type_ids or [0])) # See comment above
return response
@staff_member_required
def search_required_by_parameter(request, parameter_id):
param = get_object_or_404(ObjectParameter, pk=parameter_id)
type_ids = [t.id for t in param.candidate_types]
response = HttpResponse(mimetype='application/json')
response.write(json.dumps(type_ids or [0])) # See comment above
return response
###############################################################################
# ASSEMBLER
@staff_member_required
def assemble_game(request, game_id):
game = get_object_or_404(Game, pk=game_id)
context = {
'title': 'Assemble %s' % game.name.lower(),
'game': game,
}
return render_to_response('admin/games/game-assemble.html', context)
@staff_member_required
def refresh_assembler(request, game_id):
game = get_object_or_404(Game, pk=game_id)
context = {
'game': game,
}
return render_to_response('admin/games/assembler/refresh.html', context)
###############################################################################
# INSTANTIATING AND CONFIGURING OBJECTS
@staff_member_required
@require_post
def instantiate_type(request, game_id, type_id):
'''
Instantiate a new object and returns {objectID: obj.id} in a JSON response.
'''
game = get_object_or_404(Game, pk=game_id)
type = get_object_or_404(Type, pk=type_id)
obj = game.object_set.create(type=type)
response = HttpResponse(mimetype='application/json')
response.write(json.dumps({'objectID': obj.id, 'typeName': type.name}))
return response
@staff_member_required
@require_post
def update_object_name(request, game_id, object_id):
game, obj = get_pair_or_404(Game, 'object_set', game_id, object_id)
obj.name = request.POST['name']
obj.save()
return HttpResponse()
@staff_member_required
@require_post
def update_object_position(request, game_id, object_id):
game, obj = get_pair_or_404(Game, 'object_set', game_id, object_id)
obj.x, obj.y = _get_numbers(request.POST, 'position')
obj.save()
return HttpResponse()
@staff_member_required
@require_post
def update_object_size(request, game_id, object_id):
game, obj = get_pair_or_404(Game, 'object_set', game_id, object_id)
obj.width, obj.height = _get_numbers(request.POST, 'size')
obj.save()
return HttpResponse()
@staff_member_required
@require_post
def toggle_object_ownership(request, game_id, object_id):
game, obj = get_pair_or_404(Game, 'object_set', game_id, object_id)
obj.per_player = not obj.per_player
obj.save()
response = HttpResponse(mimetype='application/json')
response.write(json.dumps({'ownership': obj.per_player and 'player' or 'game'}))
return response
@staff_member_required
def object_configure(request, game_id, object_id):
game, obj = get_pair_or_404(Game, 'object_set', game_id, object_id)
params = obj.parameters.all()
def f(attr):
return filter(lambda p: getattr(p, attr), params)
context = {
'title': 'Configure %s' % obj.type.name,
'obj': obj,
'parameters': params,
'built_ins': f('is_built_in'),
'refs': f('is_ref'),
'factories': f('is_factory'),
'nothing_to_configure': obj.parameters.count() == 0,
}
return render_to_response('admin/games/object-configure.html', context)
@staff_member_required
@require_post
def delete_object(request, game_id, object_id):
game, obj = get_pair_or_404(Game, 'object_set', game_id, object_id)
obj.delete()
return HttpResponse()
@staff_member_required
@require_post
def save_parameter_binding(request, game_id, object_id, parameter_id):
game, obj = get_pair_or_404(Game, 'object_set', game_id, object_id)
try:
param = obj.parameters.get(pk=parameter_id)
except ObjectParameter.DoesNotExist:
raise Http404()
value = param.bind(request.POST['value'])
data = {'value': unicode(value)}
response = HttpResponse(mimetype='application/json')
response.write(json.dumps(data))
return response
@staff_member_required
def candidate_refs(request, game_id, object_id, parameter_id):
game, obj = get_pair_or_404(Game, 'object_set', game_id, object_id)
try:
param = obj.parameters.get(pk=parameter_id)
except ObjectParameter.DoesNotExist:
raise Http404()
response = HttpResponse(mimetype='application/json')
response.write(json.dumps(['object-%d' % obj.id for obj in param.candidate_objects]))
return response
###############################################################################
# LOCAL HELPERS
def _get_numbers(post, name):
return tuple(map(int, post[name].split(',')))
| |
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
# Angelos Tzotsos <tzotsos@gmail.com>
#
# Copyright (c) 2010 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import os
from sqlalchemy import create_engine, asc, desc, func, __version__, select
from sqlalchemy.sql import text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import create_session
from pycsw import util
LOGGER = logging.getLogger(__name__)
class Repository(object):
_engines = {}
@classmethod
def create_engine(clazz, url):
'''
SQL Alchemy engines are thread-safe and simple wrappers for connection pools
https://groups.google.com/forum/#!topic/sqlalchemy/t8i3RSKZGb0
To reduce startup time we can cache the engine as a class variable in the
repository object and do database initialization once
Engines are memoized by url
'''
if url not in clazz._engines:
LOGGER.debug('creating new engine: %s', url)
engine = create_engine('%s' % url, echo=False)
# load SQLite query bindings
# This can be directly bound via events
# for sqlite < 0.7, we need to to this on a per-connection basis
if engine.name in ['sqlite', 'sqlite3'] and __version__ >= '0.7':
from sqlalchemy import event
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_rec):
dbapi_connection.create_function(
'query_spatial', 4, util.query_spatial)
dbapi_connection.create_function(
'update_xpath', 3, util.update_xpath)
dbapi_connection.create_function('get_anytext', 1,
util.get_anytext)
dbapi_connection.create_function('get_geometry_area', 1,
util.get_geometry_area)
dbapi_connection.create_function('get_spatial_overlay_rank', 2,
util.get_spatial_overlay_rank)
clazz._engines[url] = engine
return clazz._engines[url]
''' Class to interact with underlying repository '''
def __init__(self, database, context, app_root=None, table='records', repo_filter=None):
''' Initialize repository '''
self.context = context
self.filter = repo_filter
self.fts = False
# Don't use relative paths, this is hack to get around
# most wsgi restriction...
if (app_root and database.startswith('sqlite:///') and
not database.startswith('sqlite:////')):
database = database.replace('sqlite:///',
'sqlite:///%s%s' % (app_root, os.sep))
self.engine = Repository.create_engine('%s' % database)
base = declarative_base(bind=self.engine)
LOGGER.debug('binding ORM to existing database')
self.postgis_geometry_column = None
schema, table = util.sniff_table(table)
self.dataset = type('dataset', (base,),
dict(__tablename__=table,__table_args__={'autoload': True,
'schema': schema}))
self.dbtype = self.engine.name
self.session = create_session(self.engine)
temp_dbtype = None
if self.dbtype == 'postgresql':
# check if PostgreSQL is enabled with PostGIS 1.x
try:
self.session.execute(select([func.postgis_version()]))
temp_dbtype = 'postgresql+postgis+wkt'
LOGGER.debug('PostgreSQL+PostGIS1+WKT detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS1+WKT detection failed')
# check if PostgreSQL is enabled with PostGIS 2.x
try:
self.session.execute('select(postgis_version())')
temp_dbtype = 'postgresql+postgis+wkt'
LOGGER.debug('PostgreSQL+PostGIS2+WKT detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS2+WKT detection failed')
# check if a native PostGIS geometry column exists
try:
result = self.session.execute("select f_geometry_column from geometry_columns where f_table_name = '%s' and f_geometry_column != 'wkt_geometry' limit 1;" % table)
row = result.fetchone()
self.postgis_geometry_column = str(row['f_geometry_column'])
temp_dbtype = 'postgresql+postgis+native'
LOGGER.debug('PostgreSQL+PostGIS+Native detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS+Native not picked up: %s', str(err))
# check if a native PostgreSQL FTS GIN index exists
result = self.session.execute("select relname from pg_class where relname='fts_gin_idx'").scalar()
self.fts = bool(result)
LOGGER.debug('PostgreSQL FTS enabled: %r', self.fts)
if temp_dbtype is not None:
LOGGER.debug('%s support detected' % temp_dbtype)
self.dbtype = temp_dbtype
if self.dbtype in ['sqlite', 'sqlite3']: # load SQLite query bindings
# <= 0.6 behaviour
if not __version__ >= '0.7':
self.connection = self.engine.raw_connection()
self.connection.create_function(
'query_spatial', 4, util.query_spatial)
self.connection.create_function(
'update_xpath', 3, util.update_xpath)
self.connection.create_function('get_anytext', 1,
util.get_anytext)
self.connection.create_function('get_geometry_area', 1,
util.get_geometry_area)
self.connection.create_function('get_spatial_overlay_rank', 2,
util.get_spatial_overlay_rank)
LOGGER.debug('setting repository queryables')
# generate core queryables db and obj bindings
self.queryables = {}
for tname in self.context.model['typenames']:
for qname in self.context.model['typenames'][tname]['queryables']:
self.queryables[qname] = {}
for qkey, qvalue in \
self.context.model['typenames'][tname]['queryables'][qname].iteritems():
self.queryables[qname][qkey] = qvalue
# flatten all queryables
# TODO smarter way of doing this
self.queryables['_all'] = {}
for qbl in self.queryables:
self.queryables['_all'].update(self.queryables[qbl])
self.queryables['_all'].update(self.context.md_core_model['mappings'])
def _create_values(self, values):
value_dict = {}
for num, value in enumerate(values):
value_dict['pvalue%d' % num] = value
return value_dict
def query_ids(self, ids):
''' Query by list of identifiers '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:Identifier'])
query = self.session.query(self.dataset).filter(column.in_(ids))
return self._get_repo_filter(query).all()
def query_domain(self, domain, typenames, domainquerytype='list',
count=False):
''' Query by property domain values '''
domain_value = getattr(self.dataset, domain)
if domainquerytype == 'range':
LOGGER.debug('Generating property name range values')
query = self.session.query(func.min(domain_value),
func.max(domain_value))
else:
if count:
LOGGER.debug('Generating property name frequency counts')
query = self.session.query(getattr(self.dataset, domain),
func.count(domain_value)).group_by(domain_value)
else:
query = self.session.query(domain_value).distinct()
return self._get_repo_filter(query).all()
def query_insert(self, direction='max'):
''' Query to get latest (default) or earliest update to repository '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:InsertDate'])
if direction == 'min':
return self._get_repo_filter(self.session.query(func.min(column))).first()[0]
# else default max
return self._get_repo_filter(self.session.query(func.max(column))).first()[0]
def query_source(self, source):
''' Query by source '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:Source'])
query = self.session.query(self.dataset).filter(column == source)
return self._get_repo_filter(query).all()
def query(self, constraint, sortby=None, typenames=None,
maxrecords=10, startposition=0):
''' Query records from underlying repository '''
# run the raw query and get total
if 'where' in constraint: # GetRecords with constraint
LOGGER.debug('constraint detected')
query = self.session.query(self.dataset).filter(
text(constraint['where'])).params(self._create_values(constraint['values']))
else: # GetRecords sans constraint
LOGGER.debug('No constraint detected')
query = self.session.query(self.dataset)
total = self._get_repo_filter(query).count()
if util.ranking_pass: #apply spatial ranking
#TODO: Check here for dbtype so to extract wkt from postgis native to wkt
LOGGER.debug('spatial ranking detected')
LOGGER.debug('Target WKT: %s', getattr(self.dataset, self.context.md_core_model['mappings']['pycsw:BoundingBox']))
LOGGER.debug('Query WKT: %s', util.ranking_query_geometry)
query = query.order_by(func.get_spatial_overlay_rank(getattr(self.dataset, self.context.md_core_model['mappings']['pycsw:BoundingBox']), util.ranking_query_geometry).desc())
#trying to make this wsgi safe
util.ranking_pass = False
util.ranking_query_geometry = ''
if sortby is not None: # apply sorting
LOGGER.debug('sorting detected')
#TODO: Check here for dbtype so to extract wkt from postgis native to wkt
sortby_column = getattr(self.dataset, sortby['propertyname'])
if sortby['order'] == 'DESC': # descending sort
if 'spatial' in sortby and sortby['spatial']: # spatial sort
query = query.order_by(func.get_geometry_area(sortby_column).desc())
else: # aspatial sort
query = query.order_by(sortby_column.desc())
else: # ascending sort
if 'spatial' in sortby and sortby['spatial']: # spatial sort
query = query.order_by(func.get_geometry_area(sortby_column))
else: # aspatial sort
query = query.order_by(sortby_column)
# always apply limit and offset
return [str(total), self._get_repo_filter(query).limit(
maxrecords).offset(startposition).all()]
def insert(self, record, source, insert_date):
''' Insert a record into the repository '''
try:
self.session.begin()
self.session.add(record)
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
def update(self, record=None, recprops=None, constraint=None):
''' Update a record in the repository based on identifier '''
if record is not None:
identifier = getattr(record,
self.context.md_core_model['mappings']['pycsw:Identifier'])
xml = getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:XML'])
anytext = getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:AnyText'])
if recprops is None and constraint is None: # full update
LOGGER.debug('full update')
update_dict = dict([(getattr(self.dataset, key),
getattr(record, key)) \
for key in record.__dict__.keys() if key != '_sa_instance_state'])
try:
self.session.begin()
self._get_repo_filter(self.session.query(self.dataset)).filter_by(
identifier=identifier).update(update_dict, synchronize_session='fetch')
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
else: # update based on record properties
LOGGER.debug('property based update')
try:
rows = rows2 = 0
self.session.begin()
for rpu in recprops:
# update queryable column and XML document via XPath
if 'xpath' not in rpu['rp']:
self.session.rollback()
raise RuntimeError('XPath not found for property %s' % rpu['rp']['name'])
if 'dbcol' not in rpu['rp']:
self.session.rollback()
raise RuntimeError('property not found for XPath %s' % rpu['rp']['name'])
rows += self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values'])).update({
getattr(self.dataset,
rpu['rp']['dbcol']): rpu['value'],
'xml': func.update_xpath(str(self.context.namespaces),
getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:XML']),
str(rpu)),
}, synchronize_session='fetch')
# then update anytext tokens
rows2 += self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values'])).update({
'anytext': func.get_anytext(getattr(
self.dataset, self.context.md_core_model['mappings']['pycsw:XML']))
}, synchronize_session='fetch')
self.session.commit()
return rows
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
def delete(self, constraint):
''' Delete a record from the repository '''
try:
self.session.begin()
rows = self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values']))
parentids = []
for row in rows: # get ids
parentids.append(getattr(row,
self.context.md_core_model['mappings']['pycsw:Identifier']))
rows=rows.delete(synchronize_session='fetch')
if rows > 0:
LOGGER.debug('Deleting all child records')
# delete any child records which had this record as a parent
rows += self._get_repo_filter(self.session.query(self.dataset)).filter(
getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:ParentIdentifier']).in_(parentids)).delete(
synchronize_session='fetch')
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
return rows
def _get_repo_filter(self, query):
''' Apply repository wide side filter / mask query '''
if self.filter is not None:
return query.filter(text(self.filter))
return query
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TFGAN project provides a lightweight GAN training/testing framework.
This file contains the core helper functions to create and train a GAN model.
See the README or examples in `tensorflow_models` for details on how to use.
TFGAN training occurs in four steps:
1) Create a model
2) Add a loss
3) Create train ops
4) Run the train ops
The functions in this file are organized around these four steps. Each function
corresponds to one of the steps.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.ops import variables as variables_lib
from tensorflow.contrib.gan.python import losses as tfgan_losses
from tensorflow.contrib.gan.python import namedtuples
from tensorflow.contrib.gan.python.losses.python import losses_impl as tfgan_losses_impl
from tensorflow.contrib.slim.python.slim import learning as slim_learning
from tensorflow.contrib.training.python.training import training
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses
from tensorflow.python.summary import summary
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import sync_replicas_optimizer
from tensorflow.python.training import training_util
__all__ = [
'gan_model',
'infogan_model',
'acgan_model',
'cyclegan_model',
'stargan_model',
'gan_loss',
'cyclegan_loss',
'stargan_loss',
'gan_train_ops',
'gan_train',
'get_sequential_train_hooks',
'get_joint_train_hooks',
'get_sequential_train_steps',
'RunTrainOpsHook',
]
def gan_model(
# Lambdas defining models.
generator_fn,
discriminator_fn,
# Real data and conditioning.
real_data,
generator_inputs,
# Optional scopes.
generator_scope='Generator',
discriminator_scope='Discriminator',
# Options.
check_shapes=True):
"""Returns GAN model outputs and variables.
Args:
generator_fn: A python lambda that takes `generator_inputs` as inputs and
returns the outputs of the GAN generator.
discriminator_fn: A python lambda that takes `real_data`/`generated data`
and `generator_inputs`. Outputs a Tensor in the range [-inf, inf].
real_data: A Tensor representing the real data.
generator_inputs: A Tensor or list of Tensors to the generator. In the
vanilla GAN case, this might be a single noise Tensor. In the conditional
GAN case, this might be the generator's conditioning.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created.
check_shapes: If `True`, check that generator produces Tensors that are the
same shape as real data. Otherwise, skip this check.
Returns:
A GANModel namedtuple.
Raises:
ValueError: If the generator outputs a Tensor that isn't the same shape as
`real_data`.
"""
# Create models
with variable_scope.variable_scope(generator_scope) as gen_scope:
generator_inputs = _convert_tensor_or_l_or_d(generator_inputs)
generated_data = generator_fn(generator_inputs)
with variable_scope.variable_scope(discriminator_scope) as dis_scope:
discriminator_gen_outputs = discriminator_fn(generated_data,
generator_inputs)
with variable_scope.variable_scope(dis_scope, reuse=True):
real_data = _convert_tensor_or_l_or_d(real_data)
discriminator_real_outputs = discriminator_fn(real_data, generator_inputs)
if check_shapes:
if not generated_data.shape.is_compatible_with(real_data.shape):
raise ValueError(
'Generator output shape (%s) must be the same shape as real data '
'(%s).' % (generated_data.shape, real_data.shape))
# Get model-specific variables.
generator_variables = variables_lib.get_trainable_variables(gen_scope)
discriminator_variables = variables_lib.get_trainable_variables(dis_scope)
return namedtuples.GANModel(
generator_inputs, generated_data, generator_variables, gen_scope,
generator_fn, real_data, discriminator_real_outputs,
discriminator_gen_outputs, discriminator_variables, dis_scope,
discriminator_fn)
def infogan_model(
# Lambdas defining models.
generator_fn,
discriminator_fn,
# Real data and conditioning.
real_data,
unstructured_generator_inputs,
structured_generator_inputs,
# Optional scopes.
generator_scope='Generator',
discriminator_scope='Discriminator'):
"""Returns an InfoGAN model outputs and variables.
See https://arxiv.org/abs/1606.03657 for more details.
Args:
generator_fn: A python lambda that takes a list of Tensors as inputs and
returns the outputs of the GAN generator.
discriminator_fn: A python lambda that takes `real_data`/`generated data`
and `generator_inputs`. Outputs a 2-tuple of (logits, distribution_list).
`logits` are in the range [-inf, inf], and `distribution_list` is a list
of Tensorflow distributions representing the predicted noise distribution
of the ith structure noise.
real_data: A Tensor representing the real data.
unstructured_generator_inputs: A list of Tensors to the generator.
These tensors represent the unstructured noise or conditioning.
structured_generator_inputs: A list of Tensors to the generator.
These tensors must have high mutual information with the recognizer.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created.
Returns:
An InfoGANModel namedtuple.
Raises:
ValueError: If the generator outputs a Tensor that isn't the same shape as
`real_data`.
ValueError: If the discriminator output is malformed.
"""
# Create models
with variable_scope.variable_scope(generator_scope) as gen_scope:
unstructured_generator_inputs = _convert_tensor_or_l_or_d(
unstructured_generator_inputs)
structured_generator_inputs = _convert_tensor_or_l_or_d(
structured_generator_inputs)
generator_inputs = (
unstructured_generator_inputs + structured_generator_inputs)
generated_data = generator_fn(generator_inputs)
with variable_scope.variable_scope(discriminator_scope) as disc_scope:
dis_gen_outputs, predicted_distributions = discriminator_fn(
generated_data, generator_inputs)
_validate_distributions(predicted_distributions, structured_generator_inputs)
with variable_scope.variable_scope(disc_scope, reuse=True):
real_data = ops.convert_to_tensor(real_data)
dis_real_outputs, _ = discriminator_fn(real_data, generator_inputs)
if not generated_data.get_shape().is_compatible_with(real_data.get_shape()):
raise ValueError(
'Generator output shape (%s) must be the same shape as real data '
'(%s).' % (generated_data.get_shape(), real_data.get_shape()))
# Get model-specific variables.
generator_variables = variables_lib.get_trainable_variables(gen_scope)
discriminator_variables = variables_lib.get_trainable_variables(disc_scope)
return namedtuples.InfoGANModel(
generator_inputs,
generated_data,
generator_variables,
gen_scope,
generator_fn,
real_data,
dis_real_outputs,
dis_gen_outputs,
discriminator_variables,
disc_scope,
lambda x, y: discriminator_fn(x, y)[0], # conform to non-InfoGAN API
structured_generator_inputs,
predicted_distributions,
discriminator_fn)
def acgan_model(
# Lambdas defining models.
generator_fn,
discriminator_fn,
# Real data and conditioning.
real_data,
generator_inputs,
one_hot_labels,
# Optional scopes.
generator_scope='Generator',
discriminator_scope='Discriminator',
# Options.
check_shapes=True):
"""Returns an ACGANModel contains all the pieces needed for ACGAN training.
The `acgan_model` is the same as the `gan_model` with the only difference
being that the discriminator additionally outputs logits to classify the input
(real or generated).
Therefore, an explicit field holding one_hot_labels is necessary, as well as a
discriminator_fn that outputs a 2-tuple holding the logits for real/fake and
classification.
See https://arxiv.org/abs/1610.09585 for more details.
Args:
generator_fn: A python lambda that takes `generator_inputs` as inputs and
returns the outputs of the GAN generator.
discriminator_fn: A python lambda that takes `real_data`/`generated data`
and `generator_inputs`. Outputs a tuple consisting of two Tensors:
(1) real/fake logits in the range [-inf, inf]
(2) classification logits in the range [-inf, inf]
real_data: A Tensor representing the real data.
generator_inputs: A Tensor or list of Tensors to the generator. In the
vanilla GAN case, this might be a single noise Tensor. In the conditional
GAN case, this might be the generator's conditioning.
one_hot_labels: A Tensor holding one-hot-labels for the batch. Needed by
acgan_loss.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created.
check_shapes: If `True`, check that generator produces Tensors that are the
same shape as real data. Otherwise, skip this check.
Returns:
A ACGANModel namedtuple.
Raises:
ValueError: If the generator outputs a Tensor that isn't the same shape as
`real_data`.
TypeError: If the discriminator does not output a tuple consisting of
(discrimination logits, classification logits).
"""
# Create models
with variable_scope.variable_scope(generator_scope) as gen_scope:
generator_inputs = _convert_tensor_or_l_or_d(generator_inputs)
generated_data = generator_fn(generator_inputs)
with variable_scope.variable_scope(discriminator_scope) as dis_scope:
with ops.name_scope(dis_scope.name + '/generated/'):
(discriminator_gen_outputs, discriminator_gen_classification_logits
) = _validate_acgan_discriminator_outputs(
discriminator_fn(generated_data, generator_inputs))
with variable_scope.variable_scope(dis_scope, reuse=True):
with ops.name_scope(dis_scope.name + '/real/'):
real_data = ops.convert_to_tensor(real_data)
(discriminator_real_outputs, discriminator_real_classification_logits
) = _validate_acgan_discriminator_outputs(
discriminator_fn(real_data, generator_inputs))
if check_shapes:
if not generated_data.shape.is_compatible_with(real_data.shape):
raise ValueError(
'Generator output shape (%s) must be the same shape as real data '
'(%s).' % (generated_data.shape, real_data.shape))
# Get model-specific variables.
generator_variables = variables_lib.get_trainable_variables(gen_scope)
discriminator_variables = variables_lib.get_trainable_variables(dis_scope)
return namedtuples.ACGANModel(
generator_inputs, generated_data, generator_variables, gen_scope,
generator_fn, real_data, discriminator_real_outputs,
discriminator_gen_outputs, discriminator_variables, dis_scope,
discriminator_fn, one_hot_labels,
discriminator_real_classification_logits,
discriminator_gen_classification_logits)
def cyclegan_model(
# Lambdas defining models.
generator_fn,
discriminator_fn,
# data X and Y.
data_x,
data_y,
# Optional scopes.
generator_scope='Generator',
discriminator_scope='Discriminator',
model_x2y_scope='ModelX2Y',
model_y2x_scope='ModelY2X',
# Options.
check_shapes=True):
"""Returns a CycleGAN model outputs and variables.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
generator_fn: A python lambda that takes `data_x` or `data_y` as inputs and
returns the outputs of the GAN generator.
discriminator_fn: A python lambda that takes `real_data`/`generated data`
and `generator_inputs`. Outputs a Tensor in the range [-inf, inf].
data_x: A `Tensor` of dataset X. Must be the same shape as `data_y`.
data_y: A `Tensor` of dataset Y. Must be the same shape as `data_x`.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created. Defaults to 'Generator'.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created. Defaults to
'Discriminator'.
model_x2y_scope: Optional variable scope for model x2y variables. Defaults
to 'ModelX2Y'.
model_y2x_scope: Optional variable scope for model y2x variables. Defaults
to 'ModelY2X'.
check_shapes: If `True`, check that generator produces Tensors that are the
same shape as `data_x` (`data_y`). Otherwise, skip this check.
Returns:
A `CycleGANModel` namedtuple.
Raises:
ValueError: If `check_shapes` is True and `data_x` or the generator output
does not have the same shape as `data_y`.
"""
# Create models.
def _define_partial_model(input_data, output_data):
return gan_model(
generator_fn=generator_fn,
discriminator_fn=discriminator_fn,
real_data=output_data,
generator_inputs=input_data,
generator_scope=generator_scope,
discriminator_scope=discriminator_scope,
check_shapes=check_shapes)
with variable_scope.variable_scope(model_x2y_scope):
model_x2y = _define_partial_model(data_x, data_y)
with variable_scope.variable_scope(model_y2x_scope):
model_y2x = _define_partial_model(data_y, data_x)
with variable_scope.variable_scope(model_y2x.generator_scope, reuse=True):
reconstructed_x = model_y2x.generator_fn(model_x2y.generated_data)
with variable_scope.variable_scope(model_x2y.generator_scope, reuse=True):
reconstructed_y = model_x2y.generator_fn(model_y2x.generated_data)
return namedtuples.CycleGANModel(model_x2y, model_y2x, reconstructed_x,
reconstructed_y)
def stargan_model(generator_fn,
discriminator_fn,
input_data,
input_data_domain_label,
generator_scope='Generator',
discriminator_scope='Discriminator'):
"""Returns a StarGAN model outputs and variables.
See https://arxiv.org/abs/1711.09020 for more details.
Args:
generator_fn: A python lambda that takes `inputs` and `targets` as inputs
and returns 'generated_data' as the transformed version of `input` based
on the `target`. `input` has shape (n, h, w, c), `targets` has shape (n,
num_domains), and `generated_data` has the same shape as `input`.
discriminator_fn: A python lambda that takes `inputs` and `num_domains` as
inputs and returns a tuple (`source_prediction`, `domain_prediction`).
`source_prediction` represents the source(real/generated) prediction by
the discriminator, and `domain_prediction` represents the domain
prediction/classification by the discriminator. `source_prediction` has
shape (n) and `domain_prediction` has shape (n, num_domains).
input_data: Tensor or a list of tensor of shape (n, h, w, c) representing
the real input images.
input_data_domain_label: Tensor or a list of tensor of shape (batch_size,
num_domains) representing the domain label associated with the real
images.
generator_scope: Optional generator variable scope. Useful if you want to
reuse a subgraph that has already been created.
discriminator_scope: Optional discriminator variable scope. Useful if you
want to reuse a subgraph that has already been created.
Returns:
StarGANModel nametuple return the tensor that are needed to compute the
loss.
Raises:
ValueError: If the shape of `input_data_domain_label` is not rank 2 or fully
defined in every dimensions.
"""
# Convert to tensor.
input_data = _convert_tensor_or_l_or_d(input_data)
input_data_domain_label = _convert_tensor_or_l_or_d(input_data_domain_label)
# Convert list of tensor to a single tensor if applicable.
if isinstance(input_data, (list, tuple)):
input_data = array_ops.concat(
[ops.convert_to_tensor(x) for x in input_data], 0)
if isinstance(input_data_domain_label, (list, tuple)):
input_data_domain_label = array_ops.concat(
[ops.convert_to_tensor(x) for x in input_data_domain_label], 0)
# Get batch_size, num_domains from the labels.
input_data_domain_label.shape.assert_has_rank(2)
input_data_domain_label.shape.assert_is_fully_defined()
batch_size, num_domains = input_data_domain_label.shape.as_list()
# Transform input_data to random target domains.
with variable_scope.variable_scope(generator_scope) as generator_scope:
generated_data_domain_target = _generate_stargan_random_domain_target(
batch_size, num_domains)
generated_data = generator_fn(input_data, generated_data_domain_target)
# Transform generated_data back to the original input_data domain.
with variable_scope.variable_scope(generator_scope, reuse=True):
reconstructed_data = generator_fn(generated_data, input_data_domain_label)
# Predict source and domain for the generated_data using the discriminator.
with variable_scope.variable_scope(
discriminator_scope) as discriminator_scope:
disc_gen_data_source_pred, disc_gen_data_domain_pred = discriminator_fn(
generated_data, num_domains)
# Predict source and domain for the input_data using the discriminator.
with variable_scope.variable_scope(discriminator_scope, reuse=True):
disc_input_data_source_pred, disc_input_data_domain_pred = discriminator_fn(
input_data, num_domains)
# Collect trainable variables from the neural networks.
generator_variables = variables_lib.get_trainable_variables(generator_scope)
discriminator_variables = variables_lib.get_trainable_variables(
discriminator_scope)
# Create the StarGANModel namedtuple.
return namedtuples.StarGANModel(
input_data=input_data,
input_data_domain_label=input_data_domain_label,
generated_data=generated_data,
generated_data_domain_target=generated_data_domain_target,
reconstructed_data=reconstructed_data,
discriminator_input_data_source_predication=disc_input_data_source_pred,
discriminator_generated_data_source_predication=disc_gen_data_source_pred,
discriminator_input_data_domain_predication=disc_input_data_domain_pred,
discriminator_generated_data_domain_predication=disc_gen_data_domain_pred,
generator_variables=generator_variables,
generator_scope=generator_scope,
generator_fn=generator_fn,
discriminator_variables=discriminator_variables,
discriminator_scope=discriminator_scope,
discriminator_fn=discriminator_fn)
def _validate_aux_loss_weight(aux_loss_weight, name='aux_loss_weight'):
if isinstance(aux_loss_weight, ops.Tensor):
aux_loss_weight.shape.assert_is_compatible_with([])
with ops.control_dependencies(
[check_ops.assert_greater_equal(aux_loss_weight, 0.0)]):
aux_loss_weight = array_ops.identity(aux_loss_weight)
elif aux_loss_weight is not None and aux_loss_weight < 0:
raise ValueError('`%s` must be greater than 0. Instead, was %s' %
(name, aux_loss_weight))
return aux_loss_weight
def _use_aux_loss(aux_loss_weight):
if aux_loss_weight is not None:
if not isinstance(aux_loss_weight, ops.Tensor):
return aux_loss_weight > 0
else:
return True
else:
return False
def _tensor_pool_adjusted_model(model, tensor_pool_fn):
"""Adjusts model using `tensor_pool_fn`.
Args:
model: A GANModel tuple.
tensor_pool_fn: A function that takes (generated_data, generator_inputs),
stores them in an internal pool and returns a previously stored
(generated_data, generator_inputs) with some probability. For example
tfgan.features.tensor_pool.
Returns:
A new GANModel tuple where discriminator outputs are adjusted by taking
pooled generator outputs as inputs. Returns the original model if
`tensor_pool_fn` is None.
Raises:
ValueError: If tensor pool does not support the `model`.
"""
if isinstance(model, namedtuples.GANModel):
pooled_generator_inputs, pooled_generated_data = tensor_pool_fn(
(model.generator_inputs, model.generated_data))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
dis_gen_outputs = model.discriminator_fn(pooled_generated_data,
pooled_generator_inputs)
return model._replace(
generator_inputs=pooled_generator_inputs,
generated_data=pooled_generated_data,
discriminator_gen_outputs=dis_gen_outputs)
elif isinstance(model, namedtuples.ACGANModel):
pooled_generator_inputs, pooled_generated_data = tensor_pool_fn(
(model.generator_inputs, model.generated_data))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
(pooled_discriminator_gen_outputs,
pooled_discriminator_gen_classification_logits) = model.discriminator_fn(
pooled_generated_data, pooled_generator_inputs)
return model._replace(
generator_inputs=pooled_generator_inputs,
generated_data=pooled_generated_data,
discriminator_gen_outputs=pooled_discriminator_gen_outputs,
discriminator_gen_classification_logits=
pooled_discriminator_gen_classification_logits)
elif isinstance(model, namedtuples.InfoGANModel):
pooled_generator_inputs, pooled_generated_data, pooled_structured_input = (
tensor_pool_fn((model.generator_inputs, model.generated_data,
model.structured_generator_inputs)))
with variable_scope.variable_scope(model.discriminator_scope, reuse=True):
(pooled_discriminator_gen_outputs,
pooled_predicted_distributions) = model.discriminator_and_aux_fn(
pooled_generated_data, pooled_generator_inputs)
return model._replace(
generator_inputs=pooled_generator_inputs,
generated_data=pooled_generated_data,
structured_generator_inputs=pooled_structured_input,
discriminator_gen_outputs=pooled_discriminator_gen_outputs,
predicted_distributions=pooled_predicted_distributions)
else:
raise ValueError('Tensor pool does not support `model`: %s.' % type(model))
def gan_loss(
# GANModel.
model,
# Loss functions.
generator_loss_fn=tfgan_losses.wasserstein_generator_loss,
discriminator_loss_fn=tfgan_losses.wasserstein_discriminator_loss,
# Auxiliary losses.
gradient_penalty_weight=None,
gradient_penalty_epsilon=1e-10,
gradient_penalty_target=1.0,
gradient_penalty_one_sided=False,
mutual_information_penalty_weight=None,
aux_cond_generator_weight=None,
aux_cond_discriminator_weight=None,
tensor_pool_fn=None,
# Options.
add_summaries=True):
"""Returns losses necessary to train generator and discriminator.
Args:
model: A GANModel tuple.
generator_loss_fn: The loss function on the generator. Takes a GANModel
tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
GANModel tuple.
gradient_penalty_weight: If not `None`, must be a non-negative Python number
or Tensor indicating how much to weight the gradient penalty. See
https://arxiv.org/pdf/1704.00028.pdf for more details.
gradient_penalty_epsilon: If `gradient_penalty_weight` is not None, the
small positive value used by the gradient penalty function for numerical
stability. Note some applications will need to increase this value to
avoid NaNs.
gradient_penalty_target: If `gradient_penalty_weight` is not None, a Python
number or `Tensor` indicating the target value of gradient norm. See the
CIFAR10 section of https://arxiv.org/abs/1710.10196. Defaults to 1.0.
gradient_penalty_one_sided: If `True`, penalty proposed in
https://arxiv.org/abs/1709.08894 is used. Defaults to `False`.
mutual_information_penalty_weight: If not `None`, must be a non-negative
Python number or Tensor indicating how much to weight the mutual
information penalty. See https://arxiv.org/abs/1606.03657 for more
details.
aux_cond_generator_weight: If not None: add a classification loss as in
https://arxiv.org/abs/1610.09585
aux_cond_discriminator_weight: If not None: add a classification loss as in
https://arxiv.org/abs/1610.09585
tensor_pool_fn: A function that takes (generated_data, generator_inputs),
stores them in an internal pool and returns previous stored
(generated_data, generator_inputs). For example
`tf.gan.features.tensor_pool`. Defaults to None (not using tensor pool).
add_summaries: Whether or not to add summaries for the losses.
Returns:
A GANLoss 2-tuple of (generator_loss, discriminator_loss). Includes
regularization losses.
Raises:
ValueError: If any of the auxiliary loss weights is provided and negative.
ValueError: If `mutual_information_penalty_weight` is provided, but the
`model` isn't an `InfoGANModel`.
"""
# Validate arguments.
gradient_penalty_weight = _validate_aux_loss_weight(
gradient_penalty_weight, 'gradient_penalty_weight')
mutual_information_penalty_weight = _validate_aux_loss_weight(
mutual_information_penalty_weight, 'infogan_weight')
aux_cond_generator_weight = _validate_aux_loss_weight(
aux_cond_generator_weight, 'aux_cond_generator_weight')
aux_cond_discriminator_weight = _validate_aux_loss_weight(
aux_cond_discriminator_weight, 'aux_cond_discriminator_weight')
# Verify configuration for mutual information penalty
if (_use_aux_loss(mutual_information_penalty_weight) and
not isinstance(model, namedtuples.InfoGANModel)):
raise ValueError(
'When `mutual_information_penalty_weight` is provided, `model` must be '
'an `InfoGANModel`. Instead, was %s.' % type(model))
# Verify configuration for mutual auxiliary condition loss (ACGAN).
if ((_use_aux_loss(aux_cond_generator_weight) or
_use_aux_loss(aux_cond_discriminator_weight)) and
not isinstance(model, namedtuples.ACGANModel)):
raise ValueError(
'When `aux_cond_generator_weight` or `aux_cond_discriminator_weight` '
'is provided, `model` must be an `ACGANModel`. Instead, was %s.' %
type(model))
# Optionally create pooled model.
pooled_model = (
_tensor_pool_adjusted_model(model, tensor_pool_fn)
if tensor_pool_fn else model)
# Create standard losses.
gen_loss = generator_loss_fn(model, add_summaries=add_summaries)
dis_loss = discriminator_loss_fn(pooled_model, add_summaries=add_summaries)
# Add optional extra losses.
if _use_aux_loss(gradient_penalty_weight):
gp_loss = tfgan_losses.wasserstein_gradient_penalty(
pooled_model,
epsilon=gradient_penalty_epsilon,
target=gradient_penalty_target,
one_sided=gradient_penalty_one_sided,
add_summaries=add_summaries)
dis_loss += gradient_penalty_weight * gp_loss
if _use_aux_loss(mutual_information_penalty_weight):
gen_info_loss = tfgan_losses.mutual_information_penalty(
model, add_summaries=add_summaries)
dis_info_loss = (
gen_info_loss
if tensor_pool_fn is None else tfgan_losses.mutual_information_penalty(
pooled_model, add_summaries=add_summaries))
gen_loss += mutual_information_penalty_weight * gen_info_loss
dis_loss += mutual_information_penalty_weight * dis_info_loss
if _use_aux_loss(aux_cond_generator_weight):
ac_gen_loss = tfgan_losses.acgan_generator_loss(
model, add_summaries=add_summaries)
gen_loss += aux_cond_generator_weight * ac_gen_loss
if _use_aux_loss(aux_cond_discriminator_weight):
ac_disc_loss = tfgan_losses.acgan_discriminator_loss(
pooled_model, add_summaries=add_summaries)
dis_loss += aux_cond_discriminator_weight * ac_disc_loss
# Gathers auxiliary losses.
if model.generator_scope:
gen_reg_loss = losses.get_regularization_loss(model.generator_scope.name)
else:
gen_reg_loss = 0
if model.discriminator_scope:
dis_reg_loss = losses.get_regularization_loss(
model.discriminator_scope.name)
else:
dis_reg_loss = 0
return namedtuples.GANLoss(gen_loss + gen_reg_loss, dis_loss + dis_reg_loss)
def cyclegan_loss(
model,
# Loss functions.
generator_loss_fn=tfgan_losses.least_squares_generator_loss,
discriminator_loss_fn=tfgan_losses.least_squares_discriminator_loss,
# Auxiliary losses.
cycle_consistency_loss_fn=tfgan_losses.cycle_consistency_loss,
cycle_consistency_loss_weight=10.0,
# Options
**kwargs):
"""Returns the losses for a `CycleGANModel`.
See https://arxiv.org/abs/1703.10593 for more details.
Args:
model: A `CycleGANModel` namedtuple.
generator_loss_fn: The loss function on the generator. Takes a `GANModel`
named tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
`GANModel` namedtuple.
cycle_consistency_loss_fn: The cycle consistency loss function. Takes a
`CycleGANModel` namedtuple.
cycle_consistency_loss_weight: A non-negative Python number or a scalar
`Tensor` indicating how much to weigh the cycle consistency loss.
**kwargs: Keyword args to pass directly to `gan_loss` to construct the loss
for each partial model of `model`.
Returns:
A `CycleGANLoss` namedtuple.
Raises:
ValueError: If `model` is not a `CycleGANModel` namedtuple.
"""
# Sanity checks.
if not isinstance(model, namedtuples.CycleGANModel):
raise ValueError(
'`model` must be a `CycleGANModel`. Instead, was %s.' % type(model))
# Defines cycle consistency loss.
cycle_consistency_loss = cycle_consistency_loss_fn(
model, add_summaries=kwargs.get('add_summaries', True))
cycle_consistency_loss_weight = _validate_aux_loss_weight(
cycle_consistency_loss_weight, 'cycle_consistency_loss_weight')
aux_loss = cycle_consistency_loss_weight * cycle_consistency_loss
# Defines losses for each partial model.
def _partial_loss(partial_model):
partial_loss = gan_loss(
partial_model,
generator_loss_fn=generator_loss_fn,
discriminator_loss_fn=discriminator_loss_fn,
**kwargs)
return partial_loss._replace(generator_loss=partial_loss.generator_loss +
aux_loss)
with ops.name_scope('cyclegan_loss_x2y'):
loss_x2y = _partial_loss(model.model_x2y)
with ops.name_scope('cyclegan_loss_y2x'):
loss_y2x = _partial_loss(model.model_y2x)
return namedtuples.CycleGANLoss(loss_x2y, loss_y2x)
def stargan_loss(
model,
generator_loss_fn=tfgan_losses.stargan_generator_loss_wrapper(
tfgan_losses_impl.wasserstein_generator_loss),
discriminator_loss_fn=tfgan_losses.stargan_discriminator_loss_wrapper(
tfgan_losses_impl.wasserstein_discriminator_loss),
gradient_penalty_weight=10.0,
gradient_penalty_epsilon=1e-10,
gradient_penalty_target=1.0,
gradient_penalty_one_sided=False,
reconstruction_loss_fn=losses.absolute_difference,
reconstruction_loss_weight=10.0,
classification_loss_fn=losses.softmax_cross_entropy,
classification_loss_weight=1.0,
classification_one_hot=True,
add_summaries=True):
"""StarGAN Loss.
The four major part can be found here: http://screen/tMRMBAohDYG.
Args:
model: (StarGAN) Model output of the stargan_model() function call.
generator_loss_fn: The loss function on the generator. Takes a
`StarGANModel` named tuple.
discriminator_loss_fn: The loss function on the discriminator. Takes a
`StarGANModel` namedtuple.
gradient_penalty_weight: (float) Gradient penalty weight. Default to 10 per
the original paper https://arxiv.org/abs/1711.09020. Set to 0 or None to
turn off gradient penalty.
gradient_penalty_epsilon: (float) A small positive number added for
numerical stability when computing the gradient norm.
gradient_penalty_target: (float, or tf.float `Tensor`) The target value of
gradient norm. Defaults to 1.0.
gradient_penalty_one_sided: (bool) If `True`, penalty proposed in
https://arxiv.org/abs/1709.08894 is used. Defaults to `False`.
reconstruction_loss_fn: The reconstruction loss function. Default to L1-norm
and the function must conform to the `tf.losses` API.
reconstruction_loss_weight: Reconstruction loss weight. Default to 10.0.
classification_loss_fn: The loss function on the discriminator's ability to
classify domain of the input. Default to one-hot softmax cross entropy
loss, and the function must conform to the `tf.losses` API.
classification_loss_weight: (float) Classification loss weight. Default to
1.0.
classification_one_hot: (bool) If the label is one hot representation.
Default to True. If False, classification classification_loss_fn need to
be sigmoid cross entropy loss instead.
add_summaries: (bool) Add the loss to the summary
Returns:
GANLoss namedtuple where we have generator loss and discriminator loss.
Raises:
ValueError: If input StarGANModel.input_data_domain_label does not have rank
2, or dimension 2 is not defined.
"""
def _classification_loss_helper(true_labels, predict_logits, scope_name):
"""Classification Loss Function Helper.
Args:
true_labels: Tensor of shape [batch_size, num_domains] representing the
label where each row is an one-hot vector.
predict_logits: Tensor of shape [batch_size, num_domains] representing the
predicted label logit, which is UNSCALED output from the NN.
scope_name: (string) Name scope of the loss component.
Returns:
Single scalar tensor representing the classification loss.
"""
with ops.name_scope(scope_name, values=(true_labels, predict_logits)):
loss = classification_loss_fn(
onehot_labels=true_labels, logits=predict_logits)
if not classification_one_hot:
loss = math_ops.reduce_sum(loss, axis=1)
loss = math_ops.reduce_mean(loss)
if add_summaries:
summary.scalar(scope_name, loss)
return loss
# Check input shape.
model.input_data_domain_label.shape.assert_has_rank(2)
model.input_data_domain_label.shape[1:].assert_is_fully_defined()
# Adversarial Loss.
generator_loss = generator_loss_fn(model, add_summaries=add_summaries)
discriminator_loss = discriminator_loss_fn(model, add_summaries=add_summaries)
# Gradient Penalty.
if _use_aux_loss(gradient_penalty_weight):
gradient_penalty_fn = tfgan_losses.stargan_gradient_penalty_wrapper(
tfgan_losses_impl.wasserstein_gradient_penalty)
discriminator_loss += gradient_penalty_fn(
model,
epsilon=gradient_penalty_epsilon,
target=gradient_penalty_target,
one_sided=gradient_penalty_one_sided,
add_summaries=add_summaries) * gradient_penalty_weight
# Reconstruction Loss.
reconstruction_loss = reconstruction_loss_fn(model.input_data,
model.reconstructed_data)
generator_loss += reconstruction_loss * reconstruction_loss_weight
if add_summaries:
summary.scalar('reconstruction_loss', reconstruction_loss)
# Classification Loss.
generator_loss += _classification_loss_helper(
true_labels=model.generated_data_domain_target,
predict_logits=model.discriminator_generated_data_domain_predication,
scope_name='generator_classification_loss') * classification_loss_weight
discriminator_loss += _classification_loss_helper(
true_labels=model.input_data_domain_label,
predict_logits=model.discriminator_input_data_domain_predication,
scope_name='discriminator_classification_loss'
) * classification_loss_weight
return namedtuples.GANLoss(generator_loss, discriminator_loss)
def _get_update_ops(kwargs, gen_scope, dis_scope, check_for_unused_ops=True):
"""Gets generator and discriminator update ops.
Args:
kwargs: A dictionary of kwargs to be passed to `create_train_op`.
`update_ops` is removed, if present.
gen_scope: A scope for the generator.
dis_scope: A scope for the discriminator.
check_for_unused_ops: A Python bool. If `True`, throw Exception if there are
unused update ops.
Returns:
A 2-tuple of (generator update ops, discriminator train ops).
Raises:
ValueError: If there are update ops outside of the generator or
discriminator scopes.
"""
if 'update_ops' in kwargs:
update_ops = set(kwargs['update_ops'])
del kwargs['update_ops']
else:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
all_gen_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS, gen_scope))
all_dis_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS, dis_scope))
if check_for_unused_ops:
unused_ops = update_ops - all_gen_ops - all_dis_ops
if unused_ops:
raise ValueError('There are unused update ops: %s' % unused_ops)
gen_update_ops = list(all_gen_ops & update_ops)
dis_update_ops = list(all_dis_ops & update_ops)
return gen_update_ops, dis_update_ops
def gan_train_ops(
model,
loss,
generator_optimizer,
discriminator_optimizer,
check_for_unused_update_ops=True,
is_chief=True,
# Optional args to pass directly to the `create_train_op`.
**kwargs):
"""Returns GAN train ops.
The highest-level call in TFGAN. It is composed of functions that can also
be called, should a user require more control over some part of the GAN
training process.
Args:
model: A GANModel.
loss: A GANLoss.
generator_optimizer: The optimizer for generator updates.
discriminator_optimizer: The optimizer for the discriminator updates.
check_for_unused_update_ops: If `True`, throws an exception if there are
update ops outside of the generator or discriminator scopes.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
**kwargs: Keyword args to pass directly to
`training.create_train_op` for both the generator and
discriminator train op.
Returns:
A GANTrainOps tuple of (generator_train_op, discriminator_train_op) that can
be used to train a generator/discriminator pair.
"""
if isinstance(model, namedtuples.CycleGANModel):
# Get and store all arguments other than model and loss from locals.
# Contents of locals should not be modified, may not affect values. So make
# a copy. https://docs.python.org/2/library/functions.html#locals.
saved_params = dict(locals())
saved_params.pop('model', None)
saved_params.pop('loss', None)
kwargs = saved_params.pop('kwargs', {})
saved_params.update(kwargs)
with ops.name_scope('cyclegan_x2y_train'):
train_ops_x2y = gan_train_ops(model.model_x2y, loss.loss_x2y,
**saved_params)
with ops.name_scope('cyclegan_y2x_train'):
train_ops_y2x = gan_train_ops(model.model_y2x, loss.loss_y2x,
**saved_params)
return namedtuples.GANTrainOps(
(train_ops_x2y.generator_train_op, train_ops_y2x.generator_train_op),
(train_ops_x2y.discriminator_train_op,
train_ops_y2x.discriminator_train_op),
training_util.get_or_create_global_step().assign_add(1))
# Create global step increment op.
global_step = training_util.get_or_create_global_step()
global_step_inc = global_step.assign_add(1)
# Get generator and discriminator update ops. We split them so that update
# ops aren't accidentally run multiple times. For now, throw an error if
# there are update ops that aren't associated with either the generator or
# the discriminator. Might modify the `kwargs` dictionary.
gen_update_ops, dis_update_ops = _get_update_ops(
kwargs, model.generator_scope.name, model.discriminator_scope.name,
check_for_unused_update_ops)
# Get the sync hooks if these are needed.
sync_hooks = []
generator_global_step = None
if isinstance(generator_optimizer,
sync_replicas_optimizer.SyncReplicasOptimizer):
# TODO(joelshor): Figure out a way to get this work without including the
# dummy global step in the checkpoint.
# WARNING: Making this variable a local variable causes sync replicas to
# hang forever.
generator_global_step = variable_scope.get_variable(
'dummy_global_step_generator',
shape=[],
dtype=global_step.dtype.base_dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
gen_update_ops += [generator_global_step.assign(global_step)]
sync_hooks.append(generator_optimizer.make_session_run_hook(is_chief))
with ops.name_scope('generator_train'):
gen_train_op = training.create_train_op(
total_loss=loss.generator_loss,
optimizer=generator_optimizer,
variables_to_train=model.generator_variables,
global_step=generator_global_step,
update_ops=gen_update_ops,
**kwargs)
discriminator_global_step = None
if isinstance(discriminator_optimizer,
sync_replicas_optimizer.SyncReplicasOptimizer):
# See comment above `generator_global_step`.
discriminator_global_step = variable_scope.get_variable(
'dummy_global_step_discriminator',
shape=[],
dtype=global_step.dtype.base_dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES])
dis_update_ops += [discriminator_global_step.assign(global_step)]
sync_hooks.append(discriminator_optimizer.make_session_run_hook(is_chief))
with ops.name_scope('discriminator_train'):
disc_train_op = training.create_train_op(
total_loss=loss.discriminator_loss,
optimizer=discriminator_optimizer,
variables_to_train=model.discriminator_variables,
global_step=discriminator_global_step,
update_ops=dis_update_ops,
**kwargs)
return namedtuples.GANTrainOps(gen_train_op, disc_train_op, global_step_inc,
sync_hooks)
# TODO(joelshor): Implement a dynamic GAN train loop, as in `Real-Time Adaptive
# Image Compression` (https://arxiv.org/abs/1705.05823)
class RunTrainOpsHook(session_run_hook.SessionRunHook):
"""A hook to run train ops a fixed number of times."""
def __init__(self, train_ops, train_steps):
"""Run train ops a certain number of times.
Args:
train_ops: A train op or iterable of train ops to run.
train_steps: The number of times to run the op(s).
"""
if not isinstance(train_ops, (list, tuple)):
train_ops = [train_ops]
self._train_ops = train_ops
self._train_steps = train_steps
def before_run(self, run_context):
for _ in range(self._train_steps):
run_context.session.run(self._train_ops)
def get_sequential_train_hooks(train_steps=namedtuples.GANTrainSteps(1, 1)):
"""Returns a hooks function for sequential GAN training.
Args:
train_steps: A `GANTrainSteps` tuple that determines how many generator
and discriminator training steps to take.
Returns:
A function that takes a GANTrainOps tuple and returns a list of hooks.
"""
def get_hooks(train_ops):
generator_hook = RunTrainOpsHook(train_ops.generator_train_op,
train_steps.generator_train_steps)
discriminator_hook = RunTrainOpsHook(train_ops.discriminator_train_op,
train_steps.discriminator_train_steps)
return [generator_hook, discriminator_hook] + list(train_ops.train_hooks)
return get_hooks
def _num_joint_steps(train_steps):
g_steps = train_steps.generator_train_steps
d_steps = train_steps.discriminator_train_steps
# Get the number of each type of step that should be run.
num_d_and_g_steps = min(g_steps, d_steps)
num_g_steps = g_steps - num_d_and_g_steps
num_d_steps = d_steps - num_d_and_g_steps
return num_d_and_g_steps, num_g_steps, num_d_steps
def get_joint_train_hooks(train_steps=namedtuples.GANTrainSteps(1, 1)):
"""Returns a hooks function for joint GAN training.
When using these train hooks, IT IS RECOMMENDED TO USE `use_locking=True` ON
ALL OPTIMIZERS TO AVOID RACE CONDITIONS.
The order of steps taken is:
1) Combined generator and discriminator steps
2) Generator only steps, if any remain
3) Discriminator only steps, if any remain
**NOTE**: Unlike `get_sequential_train_hooks`, this method performs updates
for the generator and discriminator simultaneously whenever possible. This
reduces the number of `tf.Session` calls, and can also change the training
semantics.
To illustrate the difference look at the following example:
`train_steps=namedtuples.GANTrainSteps(3, 5)` will cause
`get_sequential_train_hooks` to make 8 session calls:
1) 3 generator steps
2) 5 discriminator steps
In contrast, `get_joint_train_steps` will make 5 session calls:
1) 3 generator + discriminator steps
2) 2 discriminator steps
Args:
train_steps: A `GANTrainSteps` tuple that determines how many generator
and discriminator training steps to take.
Returns:
A function that takes a GANTrainOps tuple and returns a list of hooks.
"""
num_d_and_g_steps, num_g_steps, num_d_steps = _num_joint_steps(train_steps)
def get_hooks(train_ops):
g_op = train_ops.generator_train_op
d_op = train_ops.discriminator_train_op
joint_hook = RunTrainOpsHook([g_op, d_op], num_d_and_g_steps)
g_hook = RunTrainOpsHook(g_op, num_g_steps)
d_hook = RunTrainOpsHook(d_op, num_d_steps)
return [joint_hook, g_hook, d_hook] + list(train_ops.train_hooks)
return get_hooks
# TODO(joelshor): This function currently returns the global step. Find a
# good way for it to return the generator, discriminator, and final losses.
def gan_train(train_ops,
logdir,
get_hooks_fn=get_sequential_train_hooks(),
master='',
is_chief=True,
scaffold=None,
hooks=None,
chief_only_hooks=None,
save_checkpoint_secs=600,
save_summaries_steps=100,
config=None):
"""A wrapper around `contrib.training.train` that uses GAN hooks.
Args:
train_ops: A GANTrainOps named tuple.
logdir: The directory where the graph and checkpoints are saved.
get_hooks_fn: A function that takes a GANTrainOps tuple and returns a list
of hooks.
master: The URL of the master.
is_chief: Specifies whether or not the training is being run by the primary
replica during replica training.
scaffold: An tf.train.Scaffold instance.
hooks: List of `tf.train.SessionRunHook` callbacks which are run inside the
training loop.
chief_only_hooks: List of `tf.train.SessionRunHook` instances which are run
inside the training loop for the chief trainer only.
save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved
using a default checkpoint saver. If `save_checkpoint_secs` is set to
`None`, then the default checkpoint saver isn't used.
save_summaries_steps: The frequency, in number of global steps, that the
summaries are written to disk using a default summary saver. If
`save_summaries_steps` is set to `None`, then the default summary saver
isn't used.
config: An instance of `tf.ConfigProto`.
Returns:
Output of the call to `training.train`.
"""
new_hooks = get_hooks_fn(train_ops)
if hooks is not None:
hooks = list(hooks) + list(new_hooks)
else:
hooks = new_hooks
return training.train(
train_ops.global_step_inc_op,
logdir,
master=master,
is_chief=is_chief,
scaffold=scaffold,
hooks=hooks,
chief_only_hooks=chief_only_hooks,
save_checkpoint_secs=save_checkpoint_secs,
save_summaries_steps=save_summaries_steps,
config=config)
def get_sequential_train_steps(train_steps=namedtuples.GANTrainSteps(1, 1)):
"""Returns a thin wrapper around slim.learning.train_step, for GANs.
This function is to provide support for the Supervisor. For new code, please
use `MonitoredSession` and `get_sequential_train_hooks`.
Args:
train_steps: A `GANTrainSteps` tuple that determines how many generator
and discriminator training steps to take.
Returns:
A function that can be used for `train_step_fn` for GANs.
"""
def sequential_train_steps(sess, train_ops, global_step, train_step_kwargs):
"""A thin wrapper around slim.learning.train_step, for GANs.
Args:
sess: A Tensorflow session.
train_ops: A GANTrainOps tuple of train ops to run.
global_step: The global step.
train_step_kwargs: Dictionary controlling `train_step` behavior.
Returns:
A scalar final loss and a bool whether or not the train loop should stop.
"""
# Only run `should_stop` at the end, if required. Make a local copy of
# `train_step_kwargs`, if necessary, so as not to modify the caller's
# dictionary.
should_stop_op, train_kwargs = None, train_step_kwargs
if 'should_stop' in train_step_kwargs:
should_stop_op = train_step_kwargs['should_stop']
train_kwargs = train_step_kwargs.copy()
del train_kwargs['should_stop']
# Run generator training steps.
gen_loss = 0
for _ in range(train_steps.generator_train_steps):
cur_gen_loss, _ = slim_learning.train_step(
sess, train_ops.generator_train_op, global_step, train_kwargs)
gen_loss += cur_gen_loss
# Run discriminator training steps.
dis_loss = 0
for _ in range(train_steps.discriminator_train_steps):
cur_dis_loss, _ = slim_learning.train_step(
sess, train_ops.discriminator_train_op, global_step, train_kwargs)
dis_loss += cur_dis_loss
sess.run(train_ops.global_step_inc_op)
# Run the `should_stop` op after the global step has been incremented, so
# that the `should_stop` aligns with the proper `global_step` count.
if should_stop_op is not None:
should_stop = sess.run(should_stop_op)
else:
should_stop = False
return gen_loss + dis_loss, should_stop
return sequential_train_steps
# Helpers
def _convert_tensor_or_l_or_d(tensor_or_l_or_d):
"""Convert input, list of inputs, or dictionary of inputs to Tensors."""
if isinstance(tensor_or_l_or_d, (list, tuple)):
return [ops.convert_to_tensor(x) for x in tensor_or_l_or_d]
elif isinstance(tensor_or_l_or_d, dict):
return {k: ops.convert_to_tensor(v) for k, v in tensor_or_l_or_d.items()}
else:
return ops.convert_to_tensor(tensor_or_l_or_d)
def _validate_distributions(distributions_l, noise_l):
if not isinstance(distributions_l, (tuple, list)):
raise ValueError('`predicted_distributions` must be a list. Instead, found '
'%s.' % type(distributions_l))
if len(distributions_l) != len(noise_l):
raise ValueError('Length of `predicted_distributions` %i must be the same '
'as the length of structured noise %i.' %
(len(distributions_l), len(noise_l)))
def _validate_acgan_discriminator_outputs(discriminator_output):
try:
a, b = discriminator_output
except (TypeError, ValueError):
raise TypeError(
'A discriminator function for ACGAN must output a tuple '
'consisting of (discrimination logits, classification logits).')
return a, b
def _generate_stargan_random_domain_target(batch_size, num_domains):
"""Generate random domain label.
Args:
batch_size: (int) Number of random domain label.
num_domains: (int) Number of domains representing with the label.
Returns:
Tensor of shape (batch_size, num_domains) representing random label.
"""
domain_idx = random_ops.random_uniform(
[batch_size], minval=0, maxval=num_domains, dtype=dtypes.int32)
return array_ops.one_hot(domain_idx, num_domains)
| |
# -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
iteritems
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
`envname`
Allows you to pick an alternative environment name replacing Verbatim.
The alternate environment still has to support Verbatim's option syntax.
(default: ``'Verbatim'``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self.envname = options.get('envname', u'Verbatim')
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(u'\\begin{' + self.envname + u'}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, cp)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, cp)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while text:
a, sep1, text = text.partition(self.left)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
value += escape_tex(a, cp) + b
else:
value += escape_tex(a + sep1 + b, cp)
else:
value += escape_tex(a, cp)
else:
value = escape_tex(value, cp)
elif ttype not in Token.Escape:
value = escape_tex(value, cp)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{' + self.envname + u'}\n')
if self.full:
encoding = self.encoding or 'utf8'
# map known existings encodings from LaTeX distribution
encoding = {
'utf_8': 'utf8',
'latin_1': 'latin1',
'iso_8859_1': 'latin1',
}.get(encoding.replace('-', '_'), encoding)
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = encoding,
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buf = ''
idx = 0
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
def get_tokens_aux(self, index, text):
while text:
a, sep1, text = text.partition(self.left)
if a:
for i, t, v in self.lang.get_tokens_unprocessed(a):
yield index + i, t, v
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import inspect
import os
import pickle
import re
import sys
import types
import warnings
from inspect import signature
from tempfile import TemporaryDirectory
from textwrap import dedent
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union, cast
import dill
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.models.dag import DAG, DagContext
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskinstance import _CURRENT_CONTEXT
from airflow.models.xcom_arg import XComArg
from airflow.utils.decorators import apply_defaults
from airflow.utils.operator_helpers import determine_kwargs
from airflow.utils.process_utils import execute_in_subprocess
from airflow.utils.python_virtualenv import prepare_virtualenv, write_python_script
from airflow.utils.task_group import TaskGroup, TaskGroupContext
class PythonOperator(BaseOperator):
"""
Executes a Python callable
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonOperator`
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function
:type op_kwargs: dict (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable
:type op_args: list (templated)
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied. (templated)
:type templates_dict: dict[str]
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:type templates_exts: list[str]
"""
template_fields = ('templates_dict', 'op_args', 'op_kwargs')
template_fields_renderers = {"templates_dict": "json", "op_args": "py", "op_kwargs": "py"}
ui_color = '#ffefeb'
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects(e.g protobuf).
shallow_copy_attrs = (
'python_callable',
'op_kwargs',
)
@apply_defaults
def __init__(
self,
*,
python_callable: Callable,
op_args: Optional[List] = None,
op_kwargs: Optional[Dict] = None,
templates_dict: Optional[Dict] = None,
templates_exts: Optional[List[str]] = None,
**kwargs,
) -> None:
if kwargs.get("provide_context"):
warnings.warn(
"provide_context is deprecated as of 2.0 and is no longer required",
DeprecationWarning,
stacklevel=2,
)
kwargs.pop('provide_context', None)
super().__init__(**kwargs)
if not callable(python_callable):
raise AirflowException('`python_callable` param must be callable')
self.python_callable = python_callable
self.op_args = op_args or []
self.op_kwargs = op_kwargs or {}
self.templates_dict = templates_dict
if templates_exts:
self.template_ext = templates_exts
def execute(self, context: Dict):
context.update(self.op_kwargs)
context['templates_dict'] = self.templates_dict
self.op_kwargs = determine_kwargs(self.python_callable, self.op_args, context)
return_value = self.execute_callable()
self.log.info("Done. Returned value was: %s", return_value)
return return_value
def execute_callable(self):
"""
Calls the python callable with the given arguments.
:return: the return value of the call.
:rtype: any
"""
return self.python_callable(*self.op_args, **self.op_kwargs)
class _PythonDecoratedOperator(BaseOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:type python_callable: python callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:type op_kwargs: dict
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:type op_args: list
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. Dict will unroll to xcom values with keys as keys.
Defaults to False.
:type multiple_outputs: bool
"""
template_fields = ('op_args', 'op_kwargs')
template_fields_renderers = {"op_args": "py", "op_kwargs": "py"}
ui_color = PythonOperator.ui_color
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs = ('python_callable',)
@apply_defaults
def __init__(
self,
*,
python_callable: Callable,
task_id: str,
op_args: Tuple[Any],
op_kwargs: Dict[str, Any],
multiple_outputs: bool = False,
**kwargs,
) -> None:
kwargs['task_id'] = self._get_unique_task_id(task_id, kwargs.get('dag'), kwargs.get('task_group'))
super().__init__(**kwargs)
self.python_callable = python_callable
# Check that arguments can be binded
signature(python_callable).bind(*op_args, **op_kwargs)
self.multiple_outputs = multiple_outputs
self.op_args = op_args
self.op_kwargs = op_kwargs
@staticmethod
def _get_unique_task_id(
task_id: str, dag: Optional[DAG] = None, task_group: Optional[TaskGroup] = None
) -> str:
"""
Generate unique task id given a DAG (or if run in a DAG context)
Ids are generated by appending a unique number to the end of
the original task id.
Example:
task_id
task_id__1
task_id__2
...
task_id__20
"""
dag = dag or DagContext.get_current_dag()
if not dag:
return task_id
# We need to check if we are in the context of TaskGroup as the task_id may
# already be altered
task_group = task_group or TaskGroupContext.get_current_task_group(dag)
tg_task_id = task_group.child_id(task_id) if task_group else task_id
if tg_task_id not in dag.task_ids:
return task_id
core = re.split(r'__\d+$', task_id)[0]
suffixes = sorted(
[
int(re.split(r'^.+__', task_id)[1])
for task_id in dag.task_ids
if re.match(rf'^{core}__\d+$', task_id)
]
)
if not suffixes:
return f'{core}__1'
return f'{core}__{suffixes[-1] + 1}'
@staticmethod
def validate_python_callable(python_callable):
"""
Validate that python callable can be wrapped by operator.
Raises exception if invalid.
:param python_callable: Python object to be validated
:raises: TypeError, AirflowException
"""
if not callable(python_callable):
raise TypeError('`python_callable` param must be callable')
if 'self' in signature(python_callable).parameters.keys():
raise AirflowException('@task does not support methods')
def execute(self, context: Dict):
return_value = self.python_callable(*self.op_args, **self.op_kwargs)
self.log.debug("Done. Returned value was: %s", return_value)
if not self.multiple_outputs:
return return_value
if isinstance(return_value, dict):
for key in return_value.keys():
if not isinstance(key, str):
raise AirflowException(
'Returned dictionary keys must be strings when using '
f'multiple_outputs, found {key} ({type(key)}) instead'
)
for key, value in return_value.items():
self.xcom_push(context, key, value)
else:
raise AirflowException(
f'Returned output was type {type(return_value)} expected dictionary ' 'for multiple_outputs'
)
return return_value
T = TypeVar("T", bound=Callable) # pylint: disable=invalid-name
def task(
python_callable: Optional[Callable] = None, multiple_outputs: Optional[bool] = None, **kwargs
) -> Callable[[T], T]:
"""
Python operator decorator. Wraps a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:type python_callable: Optional[Callable]
:param multiple_outputs: if set, function return value will be
unrolled to multiple XCom values. List/Tuples will unroll to xcom values
with index as key. Dict will unroll to xcom values with keys as XCom keys.
Defaults to False.
:type multiple_outputs: bool
"""
# try to infer from type annotation
if python_callable and multiple_outputs is None:
sig = signature(python_callable).return_annotation
ttype = getattr(sig, "__origin__", None)
multiple_outputs = sig != inspect.Signature.empty and ttype in (dict, Dict)
def wrapper(f: T):
"""
Python wrapper to generate PythonDecoratedOperator out of simple python functions.
Used for Airflow Decorated interface
"""
_PythonDecoratedOperator.validate_python_callable(f)
kwargs.setdefault('task_id', f.__name__)
@functools.wraps(f)
def factory(*args, **f_kwargs):
op = _PythonDecoratedOperator(
python_callable=f,
op_args=args,
op_kwargs=f_kwargs,
multiple_outputs=multiple_outputs,
**kwargs,
)
if f.__doc__:
op.doc_md = f.__doc__
return XComArg(op)
return cast(T, factory)
if callable(python_callable):
return wrapper(python_callable)
elif python_callable is not None:
raise AirflowException('No args allowed while using @task, use kwargs instead')
return wrapper
class BranchPythonOperator(PythonOperator, SkipMixin):
"""
Allows a workflow to "branch" or follow a path following the execution
of this task.
It derives the PythonOperator and expects a Python function that returns
a single task_id or list of task_ids to follow. The task_id(s) returned
should point to a task directly downstream from {self}. All other "branches"
or directly downstream tasks are marked with a state of ``skipped`` so that
these paths can't move forward. The ``skipped`` states are propagated
downstream to allow for the DAG state to fill up and the DAG run's state
to be inferred.
"""
def execute(self, context: Dict):
branch = super().execute(context)
self.skip_all_except(context['ti'], branch)
return branch
class ShortCircuitOperator(PythonOperator, SkipMixin):
"""
Allows a workflow to continue only if a condition is met. Otherwise, the
workflow "short-circuits" and downstream tasks are skipped.
The ShortCircuitOperator is derived from the PythonOperator. It evaluates a
condition and short-circuits the workflow if the condition is False. Any
downstream tasks are marked with a state of "skipped". If the condition is
True, downstream tasks proceed as normal.
The condition is determined by the result of `python_callable`.
"""
def execute(self, context: Dict):
condition = super().execute(context)
self.log.info("Condition result is %s", condition)
if condition:
self.log.info('Proceeding with downstream tasks...')
return
self.log.info('Skipping downstream tasks...')
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks)
self.log.info("Done.")
class PythonVirtualenvOperator(PythonOperator):
"""
Allows one to run a function in a virtualenv that is created and destroyed
automatically (with certain caveats).
The function must be defined using def, and not be
part of a class. All imports must happen inside the function
and no variables outside of the scope may be referenced. A global scope
variable named virtualenv_string_args will be available (populated by
string_args). In addition, one can pass stuff through op_args and op_kwargs, and one
can use a return value.
Note that if your virtualenv runs in a different Python major version than Airflow,
you cannot use return values, op_args, op_kwargs, or use any macros that are being provided to
Airflow through plugins. You can use string_args though.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:PythonVirtualenvOperator`
:param python_callable: A python function with no references to outside variables,
defined with def, which will be run in a virtualenv
:type python_callable: function
:param requirements: A list of requirements as specified in a pip install command
:type requirements: list[str]
:param python_version: The Python version to run the virtualenv with. Note that
both 2 and 2.7 are acceptable forms.
:type python_version: Optional[Union[str, int, float]]
:param use_dill: Whether to use dill to serialize
the args and result (pickle is default). This allow more complex types
but requires you to include dill in your requirements.
:type use_dill: bool
:param system_site_packages: Whether to include
system_site_packages in your virtualenv.
See virtualenv documentation for more information.
:type system_site_packages: bool
:param op_args: A list of positional arguments to pass to python_callable.
:type op_args: list
:param op_kwargs: A dict of keyword arguments to pass to python_callable.
:type op_kwargs: dict
:param string_args: Strings that are present in the global var virtualenv_string_args,
available to python_callable at runtime as a list[str]. Note that args are split
by newline.
:type string_args: list[str]
:param templates_dict: a dictionary where the values are templates that
will get templated by the Airflow engine sometime between
``__init__`` and ``execute`` takes place and are made available
in your callable's context after the template has been applied
:type templates_dict: dict of str
:param templates_exts: a list of file extensions to resolve while
processing templated fields, for examples ``['.sql', '.hql']``
:type templates_exts: list[str]
"""
BASE_SERIALIZABLE_CONTEXT_KEYS = {
'ds_nodash',
'inlets',
'next_ds',
'next_ds_nodash',
'outlets',
'params',
'prev_ds',
'prev_ds_nodash',
'run_id',
'task_instance_key_str',
'test_mode',
'tomorrow_ds',
'tomorrow_ds_nodash',
'ts',
'ts_nodash',
'ts_nodash_with_tz',
'yesterday_ds',
'yesterday_ds_nodash',
}
PENDULUM_SERIALIZABLE_CONTEXT_KEYS = {
'execution_date',
'next_execution_date',
'prev_execution_date',
'prev_execution_date_success',
'prev_start_date_success',
}
AIRFLOW_SERIALIZABLE_CONTEXT_KEYS = {'macros', 'conf', 'dag', 'dag_run', 'task'}
@apply_defaults
def __init__( # pylint: disable=too-many-arguments
self,
*,
python_callable: Callable,
requirements: Optional[Iterable[str]] = None,
python_version: Optional[Union[str, int, float]] = None,
use_dill: bool = False,
system_site_packages: bool = True,
op_args: Optional[List] = None,
op_kwargs: Optional[Dict] = None,
string_args: Optional[Iterable[str]] = None,
templates_dict: Optional[Dict] = None,
templates_exts: Optional[List[str]] = None,
**kwargs,
):
if (
not isinstance(python_callable, types.FunctionType)
or isinstance(python_callable, types.LambdaType)
and python_callable.__name__ == "<lambda>"
):
raise AirflowException('PythonVirtualenvOperator only supports functions for python_callable arg')
if (
python_version
and str(python_version)[0] != str(sys.version_info.major)
and (op_args or op_kwargs)
):
raise AirflowException(
"Passing op_args or op_kwargs is not supported across different Python "
"major versions for PythonVirtualenvOperator. Please use string_args."
)
super().__init__(
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
templates_dict=templates_dict,
templates_exts=templates_exts,
**kwargs,
)
self.requirements = list(requirements or [])
self.string_args = string_args or []
self.python_version = python_version
self.use_dill = use_dill
self.system_site_packages = system_site_packages
if not self.system_site_packages and self.use_dill and 'dill' not in self.requirements:
self.requirements.append('dill')
self.pickling_library = dill if self.use_dill else pickle
def execute(self, context: Dict):
serializable_context = {key: context[key] for key in self._get_serializable_context_keys()}
super().execute(context=serializable_context)
def execute_callable(self):
with TemporaryDirectory(prefix='venv') as tmp_dir:
if self.templates_dict:
self.op_kwargs['templates_dict'] = self.templates_dict
input_filename = os.path.join(tmp_dir, 'script.in')
output_filename = os.path.join(tmp_dir, 'script.out')
string_args_filename = os.path.join(tmp_dir, 'string_args.txt')
script_filename = os.path.join(tmp_dir, 'script.py')
prepare_virtualenv(
venv_directory=tmp_dir,
python_bin=f'python{self.python_version}' if self.python_version else None,
system_site_packages=self.system_site_packages,
requirements=self.requirements,
)
self._write_args(input_filename)
self._write_string_args(string_args_filename)
write_python_script(
jinja_context=dict(
op_args=self.op_args,
op_kwargs=self.op_kwargs,
pickling_library=self.pickling_library.__name__,
python_callable=self.python_callable.__name__,
python_callable_source=dedent(inspect.getsource(self.python_callable)),
),
filename=script_filename,
)
execute_in_subprocess(
cmd=[
f'{tmp_dir}/bin/python',
script_filename,
input_filename,
output_filename,
string_args_filename,
]
)
return self._read_result(output_filename)
def _write_args(self, filename):
if self.op_args or self.op_kwargs:
with open(filename, 'wb') as file:
self.pickling_library.dump({'args': self.op_args, 'kwargs': self.op_kwargs}, file)
def _get_serializable_context_keys(self):
def _is_airflow_env():
return self.system_site_packages or 'apache-airflow' in self.requirements
def _is_pendulum_env():
return 'pendulum' in self.requirements and 'lazy_object_proxy' in self.requirements
serializable_context_keys = self.BASE_SERIALIZABLE_CONTEXT_KEYS.copy()
if _is_airflow_env():
serializable_context_keys.update(self.AIRFLOW_SERIALIZABLE_CONTEXT_KEYS)
if _is_pendulum_env() or _is_airflow_env():
serializable_context_keys.update(self.PENDULUM_SERIALIZABLE_CONTEXT_KEYS)
return serializable_context_keys
def _write_string_args(self, filename):
with open(filename, 'w') as file:
file.write('\n'.join(map(str, self.string_args)))
def _read_result(self, filename):
if os.stat(filename).st_size == 0:
return None
with open(filename, 'rb') as file:
try:
return self.pickling_library.load(file)
except ValueError:
self.log.error(
"Error deserializing result. Note that result deserialization "
"is not supported across major Python versions."
)
raise
def get_current_context() -> Dict[str, Any]:
"""
Obtain the execution context for the currently executing operator without
altering user method's signature.
This is the simplest method of retrieving the execution context dictionary.
**Old style:**
.. code:: python
def my_task(**context):
ti = context["ti"]
**New style:**
.. code:: python
from airflow.task.context import get_current_context
def my_task():
context = get_current_context()
ti = context["ti"]
Current context will only have value if this method was called after an operator
was starting to execute.
"""
if not _CURRENT_CONTEXT:
raise AirflowException(
"Current context was requested but no context was found! "
"Are you running within an airflow task?"
)
return _CURRENT_CONTEXT[-1]
| |
# IM - Infrastructure Manager
# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import subprocess
class AuthenticationParserError(Exception):
"""
Error while parsing a token.
Args:
- line(int, optional): line where the error occurred.
"""
def __init__(self, line=-1):
self.line = line
Exception.__init__(self, "Error while parsing a token at line %d." % line)
class Authentication:
"""
Authentication parser and storage of tokens.
A token is a dictionary with the keys:
- id: ID used to refer the token.
- type: service, one of InfrastructureManager, VMRC, OpenNebula, EC2, OpenStack, OCCI,
LibCloud and LibVirt.
- username: username in the service; in EC2 and OpenStack it is the *Access Key ID*.
- host(optional): access point of the provider
Args:
- auth_data(list or Authentication): list of tokens or instance of this class.
"""
def __init__(self, auth_data):
if isinstance(auth_data, Authentication):
self.auth_list = auth_data.auth_list
else:
self.auth_list = auth_data
def getAuthInfo(self, type):
"""Return a list of tokens with a type."""
return [ auth for auth in self.auth_list if auth['type'] == type ]
def getAuthInfoByID(self, id):
"""Return a list of tokens with a id."""
return [ auth for auth in self.auth_list if auth['id'] == id ]
def compare(self, other_auth, type):
"""Return true if this instance has some token of a type equal to the passed tokens."""
auth0 = other_auth.getAuthInfo(type)
for token in self.getAuthInfo(type):
for token0 in auth0:
if token == token0:
return True
return False
@staticmethod
def split_line(line):
"""
Split line using ; as separator char
considering single quotes as a way to delimit
tokens. (in particular to enable using char ; inside a token)
"""
tokens = []
token = ""
in_qoutes = False
in_dqoutes = False
for char in line:
if char == '"' and not in_qoutes:
in_dqoutes = not in_dqoutes
elif char == "'" and not in_dqoutes:
in_qoutes = not in_qoutes
elif char == ";" and not in_qoutes and not in_dqoutes:
tokens.append(token)
token = ""
else:
token += char
# Add the last token
if token.strip() != "":
tokens.append(token)
return tokens
@staticmethod
# fetch the output using the command
def run_command(cmd):
proc = subprocess.Popen(cmd.split(' '), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
outs, errs = proc.communicate()
if proc.returncode != 0:
if errs == b'':
errs = outs
raise Exception("Failed to get auth value using command %s: %s" % (cmd, errs.decode('utf-8')))
return outs.decode('utf-8').replace('\n', '')
@staticmethod
def read_auth_data(filename):
"""
Read a file to load the Authentication data.
The file has the following format:
id = one; type = OpenNebula; host = oneserver:2633; username = user; password = pass
type = InfrastructureManager; username = user; password = 'pass;test'
type = VMRC; host = http://server:8080/vmrc; username = user; password = "pass';test"
id = ec2; type = EC2; username = ACCESS_KEY; password = SECRET_KEY
id = oshost; type = OpenStack; host = oshost:8773; username = ACCESS_KEY; key = SECRET_KEY
id = occi; type = OCCI; host = occiserver:4567; username = user; password = file(/tmp/filename)
id = occi; type = OCCI; proxy = file(/tmp/proxy.pem)
type = InfrastructureManager; token = command(oidc-token OIDC_ACCOUNT)
Arguments:
- filename(str or list): The filename to read or list of auth lines
Returns: a list with all the auth data
"""
if isinstance(filename, list):
lines = filename
else:
auth_file = open(filename, 'r')
lines = auth_file.readlines()
auth_file.close()
res = []
i = 0
for line in lines:
i += 1
line = line.strip()
if len(line) > 0 and not line.startswith("#"):
auth = {}
tokens = Authentication.split_line(line)
for token in tokens:
key_value = token.split(" = ")
if len(key_value) != 2:
raise AuthenticationParserError(i)
else:
key = key_value[0].strip()
value = key_value[1].strip().replace("\\n", "\n")
# Enable to specify a commnad and set the contents of the output
if value.startswith("command(") and value.endswith(")"):
command = value[8:len(value) - 1]
value = Authentication.run_command(command)
# Enable to specify a filename and set the contents of
# it
if value.startswith("file(") and value.endswith(")"):
filename = value[5:len(value) - 1]
try:
value_file = open(filename, 'r')
value = value_file.read()
value_file.close()
except:
pass
auth[key] = value.strip().replace("\\n", "\n")
res.append(auth)
return res
@staticmethod
def dump(auth):
"""
Serialize an Authentication so that it can be read by 'read_auth_data' later.
"""
if isinstance(auth, Authentication):
auth = auth.auth_list
return [ " ; ".join([ "%s = %s" % (k, v.replace("\n", "\\n")) for k,v in a.items() ]) + "\n" for a in auth ]
@staticmethod
def normalize(auth0):
"""
Remove repeated entries.
"""
auth = auth0.auth_list if isinstance(auth0, Authentication) else auth0
s, l = set(), []
for i, a in enumerate(auth):
if a in s:
l.insert(0, i)
else:
s.add(a)
for i in l: del auth[i]
return auth0
| |
"""
Module for jenkinsapi Node class
"""
import json
import logging
import xml.etree.ElementTree as ET
import time
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.custom_exceptions import PostRequired, TimeOut
from jenkinsapi.custom_exceptions import JenkinsAPIException
from six.moves.urllib.parse import quote as urlquote
log = logging.getLogger(__name__)
class Node(JenkinsBase):
"""
Class to hold information on nodes that are attached as slaves
to the master jenkins instance
"""
def __init__(self, jenkins_obj, baseurl, nodename, node_dict, poll=True):
"""
Init a node object by providing all relevant pointers to it
:param jenkins_obj: ref to the jenkins obj
:param baseurl: basic url for querying information on a node
If url is not set - object will construct it itself. This is
useful when node is being created and not exists in Jenkins yet
:param nodename: hostname of the node
:param dict node_dict: Dict with node parameters as described below
:param bool poll: set to False if node does not exist or automatic
refresh from Jenkins is not required. Default is True.
If baseurl parameter is set to None - poll parameter will be
set to False
JNLP Node:
{
'num_executors': int,
'node_description': str,
'remote_fs': str,
'labels': str,
'exclusive': bool
}
SSH Node:
{
'num_executors': int,
'node_description': str,
'remote_fs': str,
'labels': str,
'exclusive': bool,
'host': str,
'port': int
'credential_description': str,
'jvm_options': str,
'java_path': str,
'prefix_start_slave_cmd': str,
'suffix_start_slave_cmd': str
'max_num_retries': int,
'retry_wait_time': int,
'retention': str ('Always' or 'OnDemand')
'ondemand_delay': int (only for OnDemand retention)
'ondemand_idle_delay': int (only for OnDemand retention)
'env': [
{
'key':'TEST',
'value':'VALUE'
},
{
'key':'TEST2',
'value':'value2'
}
],
'tool_location': [
{
"key": "hudson.tasks.Maven$MavenInstallation$DescriptorImpl@Maven 3.0.5",
"home": "/home/apache-maven-3.0.5/"
},
{
"key": "hudson.plugins.git.GitTool$DescriptorImpl@Default",
"home": "/home/git-3.0.5/"
},
]
}
:return: None
:return: Node obj
"""
self.name = nodename
self.jenkins = jenkins_obj
if not baseurl:
poll = False
baseurl = '%s/computer/%s' % (self.jenkins.baseurl, self.name)
JenkinsBase.__init__(self, baseurl, poll=poll)
self.node_attributes = node_dict
self._element_tree = None
self._config = None
def get_node_attributes(self):
"""
Gets node attributes as dict
Used by Nodes object when node is created
:return: Node attributes dict formatted for Jenkins API request
to create node
"""
na = self.node_attributes
if not na.get('credential_description', False):
# If credentials description is not present - we will create
# JNLP node
launcher = {'stapler-class': 'hudson.slaves.JNLPLauncher'}
else:
try:
credential = self.jenkins.credentials[
na['credential_description']
]
except KeyError:
raise JenkinsAPIException('Credential with description "%s"'
' not found'
% na['credential_description'])
retries = na['max_num_retries'] if 'max_num_retries' in na else ''
re_wait = na['retry_wait_time'] if 'retry_wait_time' in na else ''
launcher = {
'stapler-class': 'hudson.plugins.sshslaves.SSHLauncher',
'$class': 'hudson.plugins.sshslaves.SSHLauncher',
'host': na['host'],
'port': na['port'],
'credentialsId': credential.credential_id,
'jvmOptions': na['jvm_options'],
'javaPath': na['java_path'],
'prefixStartSlaveCmd': na['prefix_start_slave_cmd'],
'suffixStartSlaveCmd': na['suffix_start_slave_cmd'],
'maxNumRetries': retries,
'retryWaitTime': re_wait
}
retention = {
'stapler-class': 'hudson.slaves.RetentionStrategy$Always',
'$class': 'hudson.slaves.RetentionStrategy$Always'
}
if 'retention' in na and na['retention'].lower() == 'ondemand':
retention = {
'stapler-class': 'hudson.slaves.RetentionStrategy$Demand',
'$class': 'hudson.slaves.RetentionStrategy$Demand',
'inDemandDelay': na['ondemand_delay'],
'idleDelay': na['ondemand_idle_delay']
}
node_props = {
'stapler-class-bag': 'true'
}
if 'env' in na:
node_props.update({
'hudson-slaves-EnvironmentVariablesNodeProperty': {
'env': na['env']
}
})
if 'tool_location' in na:
node_props.update({
"hudson-tools-ToolLocationNodeProperty": {
"locations": na['tool_location']
}
})
params = {
'name': self.name,
'type': 'hudson.slaves.DumbSlave$DescriptorImpl',
'json': json.dumps({
'name': self.name,
'nodeDescription': na.get('node_description', ''),
'numExecutors': na['num_executors'],
'remoteFS': na['remote_fs'],
'labelString': na['labels'],
'mode': 'EXCLUSIVE' if na['exclusive'] else 'NORMAL',
'retentionStrategy': retention,
'type': 'hudson.slaves.DumbSlave',
'nodeProperties': node_props,
'launcher': launcher
})
}
return params
def get_jenkins_obj(self):
return self.jenkins
def __str__(self):
return self.name
def is_online(self):
return not self.poll(tree='offline')['offline']
def is_temporarily_offline(self):
return self.poll(tree='temporarilyOffline')['temporarilyOffline']
def is_jnlpagent(self):
return self._data['jnlpAgent']
def is_idle(self):
return self.poll(tree='idle')['idle']
def set_online(self):
"""
Set node online.
Before change state verify client state: if node set 'offline'
but 'temporarilyOffline' is not set - client has connection problems
and AssertionError raised.
If after run node state has not been changed raise AssertionError.
"""
self.poll()
# Before change state check if client is connected
if self._data['offline'] and not self._data['temporarilyOffline']:
raise AssertionError("Node is offline and not marked as "
"temporarilyOffline, check client "
"connection: offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
if self._data['offline'] and self._data['temporarilyOffline']:
self.toggle_temporarily_offline()
if self._data['offline']:
raise AssertionError("The node state is still offline, "
"check client connection:"
" offline = %s, "
"temporarilyOffline = %s" %
(self._data['offline'],
self._data['temporarilyOffline']))
def set_offline(self, message="requested from jenkinsapi"):
"""
Set node offline.
If after run node state has not been changed raise AssertionError.
: param message: optional string explain why you are taking this
node offline
"""
if not self._data['offline']:
self.toggle_temporarily_offline(message)
data = self.poll(tree='offline,temporarilyOffline')
if not data['offline']:
raise AssertionError("The node state is still online:" +
"offline = %s , temporarilyOffline = %s" %
(data['offline'],
data['temporarilyOffline']))
def toggle_temporarily_offline(self, message="requested from jenkinsapi"):
"""
Switches state of connected node (online/offline) and
set 'temporarilyOffline' property (True/False)
Calling the same method again will bring node status back.
:param message: optional string can be used to explain why you
are taking this node offline
"""
initial_state = self.is_temporarily_offline()
url = self.baseurl + \
"/toggleOffline?offlineMessage=" + urlquote(message)
try:
html_result = self.jenkins.requester.get_and_confirm_status(url)
except PostRequired:
html_result = self.jenkins.requester.post_and_confirm_status(
url,
data={})
self.poll()
log.debug(html_result)
state = self.is_temporarily_offline()
if initial_state == state:
raise AssertionError(
"The node state has not changed: temporarilyOffline = %s" %
state)
def update_offline_reason(self, reason):
"""
Update offline reason on a temporary offline clsuter
"""
if self.is_temporarily_offline():
url = self.baseurl + '/changeOfflineCause?offlineMessage=' + urlquote(reason)
self.jenkins.requester.post_and_confirm_status(url, data={})
def offline_reason(self):
return self._data['offlineCauseReason']
@property
def _et(self):
return self._get_config_element_tree()
def _get_config_element_tree(self):
"""
Returns an xml element tree for the node's config.xml. The
resulting tree is cached for quick lookup.
"""
if self._config is None:
self.load_config()
if self._element_tree is None:
self._element_tree = ET.fromstring(self._config)
return self._element_tree
def get_config(self):
"""
Returns the config.xml from the node.
"""
response = self.jenkins.requester.get_and_confirm_status(
"%(baseurl)s/config.xml" % self.__dict__)
return response.text
def load_config(self):
"""
Loads the config.xml for the node allowing it to be re-queried
without generating new requests.
"""
if self.name == 'master':
raise JenkinsAPIException('master node does not have config.xml')
self._config = self.get_config()
self._get_config_element_tree()
def upload_config(self, config_xml):
"""
Uploads config_xml to the config.xml for the node.
"""
if self.name == 'master':
raise JenkinsAPIException('master node does not have config.xml')
self.jenkins.requester.post_and_confirm_status(
"%(baseurl)s/config.xml" % self.__dict__,
data=config_xml)
def get_labels(self):
"""
Returns the labels for a slave as a string with each label
separated by the ' ' character.
"""
return self.get_config_element('label')
def get_num_executors(self):
try:
return self.get_config_element('numExecutors')
except JenkinsAPIException:
return self._data['numExecutors']
def set_num_executors(self, value):
"""
Sets number of executors for node
Warning! Setting number of executors on master node will erase all
other settings
"""
set_value = value if isinstance(value, str) else str(value)
if self.name == 'master':
# master node doesn't have config.xml, so we're going to submit
# form here
data = 'json=%s' % urlquote(
json.dumps({
"numExecutors": set_value,
"nodeProperties": {
"stapler-class-bag": "true"
}
})
)
url = self.baseurl + '/configSubmit'
self.jenkins.requester.post_and_confirm_status(url, data=data)
else:
self.set_config_element('numExecutors', set_value)
self.poll()
def get_config_element(self, el_name):
"""
Returns simple config element.
Better not to be used to return "nodeProperties" or "launcher"
"""
return self._et.find(el_name).text
def set_config_element(self, el_name, value):
"""
Sets simple config element
"""
self._et.find(el_name).text = value
xml_str = ET.tostring(self._et)
self.upload_config(xml_str)
def get_monitor(self, monitor_name, poll_monitor=True):
"""
Polls the node returning one of the monitors in the monitorData branch of the
returned node api tree.
"""
monitor_data_key = 'monitorData'
if poll_monitor:
# polling as monitors like response time can be updated
monitor_data = self.poll(tree=monitor_data_key)[monitor_data_key]
else:
monitor_data = self._data[monitor_data_key]
full_monitor_name = 'hudson.node_monitors.{0}'.format(monitor_name)
if full_monitor_name not in monitor_data:
raise AssertionError('Node monitor %s not found' % monitor_name)
return monitor_data[full_monitor_name]
def get_available_physical_memory(self):
"""
Returns the node's available physical memory in bytes.
"""
monitor_data = self.get_monitor('SwapSpaceMonitor')
return monitor_data['availablePhysicalMemory']
def get_available_swap_space(self):
"""
Returns the node's available swap space in bytes.
"""
monitor_data = self.get_monitor('SwapSpaceMonitor')
return monitor_data['availableSwapSpace']
def get_total_physical_memory(self):
"""
Returns the node's total physical memory in bytes.
"""
monitor_data = self.get_monitor('SwapSpaceMonitor')
return monitor_data['totalPhysicalMemory']
def get_total_swap_space(self):
"""
Returns the node's total swap space in bytes.
"""
monitor_data = self.get_monitor('SwapSpaceMonitor')
return monitor_data['totalSwapSpace']
def get_workspace_path(self):
"""
Returns the local path to the node's Jenkins workspace directory.
"""
monitor_data = self.get_monitor('DiskSpaceMonitor')
return monitor_data['path']
def get_workspace_size(self):
"""
Returns the size in bytes of the node's Jenkins workspace directory.
"""
monitor_data = self.get_monitor('DiskSpaceMonitor')
return monitor_data['size']
def get_temp_path(self):
"""
Returns the local path to the node's temp directory.
"""
monitor_data = self.get_monitor('TemporarySpaceMonitor')
return monitor_data['path']
def get_temp_size(self):
"""
Returns the size in bytes of the node's temp directory.
"""
monitor_data = self.get_monitor('TemporarySpaceMonitor')
return monitor_data['size']
def get_architecture(self):
"""
Returns the system architecture of the node eg. "Linux (amd64)".
"""
# no need to poll as the architecture will never change
return self.get_monitor('ArchitectureMonitor', poll_monitor=False)
def block_until_idle(self, timeout, poll_time=5):
"""
Blocks until the node become idle.
:param timeout: Time in second when the wait is aborted.
:param poll_time: Interval in seconds between each check.
:@raise TimeOut
"""
start_time = time.time()
while not self.is_idle() and (time.time() - start_time) < timeout:
log.debug(
"Waiting for the node to become idle. Elapsed time: %s",
(time.time() - start_time)
)
time.sleep(poll_time)
if not self.is_idle():
raise TimeOut(
"The node has not become idle after {} minutes."
.format(timeout/60)
)
def get_response_time(self):
"""
Returns the node's average response time.
"""
monitor_data = self.get_monitor('ResponseTimeMonitor')
return monitor_data['average']
def get_clock_difference(self):
"""
Returns the difference between the node's clock and the master Jenkins clock.
Used to detect out of sync clocks.
"""
monitor_data = self.get_monitor('ClockMonitor')
return monitor_data['diff']
| |
#!/usr/bin/env python
# -*- python -*-
################################################################################
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
import os
import re
import codegen
import genutil
import ildutil
import math
class operand_field_t(object):
def __init__(self,
name,
aggtype,
ctype,
bitwidth,
default_visibility=None,
default_initializer=None,
xprint='NOPRINT',
internal_or_public="INTERNAL",
dio="DO",
eio="EO",):
self.name = name
self.aggtype = aggtype
self.ctype = ctype
self.bitwidth = int(bitwidth)
self.default_visibility = default_visibility
self.xprint = xprint
self.internal_or_public = internal_or_public
self.dio = dio
self.eio = eio
if self.eio in ['EI','EO']:
pass
else:
err = "Bad Encoder IO value: %s -- need one of {EI,EO}"
genutil.die(err % self.eio)
if self.dio in ['DI','DO','DS']:
pass
else:
err = "Bad decoder IO value: %s -- need one of {DI,DO,DS}"
genutil.die(err % self.eio)
if self.eio == 'EI':
self.encoder_input = True
else:
self.encoder_input = False
if self.dio == 'DS':
self.decoder_skip = True
else:
self.decoder_skip = False
#NOTE: this next field is only used if initialize_each_field is True.
self.default_initializer = default_initializer
self.is_enum = 'enum' in self.ctype
# this is the C type that will be used in the operand storage struct.
self.storage_type = None
#if True using bit fields
self.compressed = False
def print_field(self):
if self.xprint == 'PRINT':
return True
return False
def key_operand_name(a):
return a.name
def key_bitwidth(a):
return a.bitwidth
def sort_cmp_operands(a):
b = sorted(a, key=key_operand_name)
c = sorted(b, key=key_bitwidth)
return c
class operands_storage_t(object):
"""This is where we build up the storage for the fields that hold
the operand values.
"""
def __init__(self,lines,compress_operands=False):
#a dict of operand name to operand_field_t
self.operand_fields = self._read_storage_fields(lines)
self.compressed = compress_operands
# the prefix of the accessor function
self.xed_accessor_fn_pfx = ildutil.xed_strings['op_accessor']
#list of bin, each bin is operands
#used for squeezing operands with a few bits to one 32 bit variable
self.bins = []
def _read_storage_fields(self,lines):
''' Return a dictionary of operand_field_t objects
indexed by field name '''
comment_pattern = re.compile(r'[#].*$')
operand_types = {}
for line in lines:
pline = comment_pattern.sub('',line).strip()
if pline == '':
continue
wrds = pline.split()
if len(wrds) != 9:
genutil.die("Bad number of tokens on line: " + line)
# aggtype is "SCALAR"
(name, aggtype, ctype, width, default_visibility,
xprint, internal_or_public, dio, eio) = wrds
if name in operand_types:
genutil.die("Duplicate name %s in input-fields file." % (name))
if aggtype != 'SCALAR':
err = ("type different than SCALAR is not" +
" supported in: %s" % (line))
genutil.die(err)
if ctype == 'xed_reg_enum_t':
default_initializer = 'XED_REG_INVALID'
elif ctype == 'xed_iclass_enum_t':
default_initializer = 'XED_ICLASS_INVALID'
else:
default_initializer = '0'
operand_types[name] = operand_field_t(name, aggtype,
ctype, width,
default_visibility,
default_initializer,
xprint,
internal_or_public,
dio,
eio,)
return operand_types
def get_operand(self,opname):
return self.operand_fields[opname]
def get_operands(self):
return self.operand_fields
def decoder_skip(self,operand):
return self.operand_fields[operand].decoder_skip
def get_ctype(self,operand):
return self.operand_fields[operand].ctype
def get_storage_type(self,operand):
return self.operand_fields[operand].storage_type
def _gen_op_getter_fo(self,opname):
''' generate the function object for the getter accessors
adding cast to the C type according to the data files(ctype)'''
inst = 'd'
fname = get_op_getter_fn(opname)
ret_type = self.get_ctype(opname)
fo = codegen.function_object_t(fname,
return_type=ret_type,
static=True,
inline=True)
fo.add_arg('const xed_decoded_inst_t* %s' % inst)
op = opname.lower()
fo.add_code_eol('return (%s)%s->_operands.%s' % (ret_type,inst, op))
return fo
def _gen_op_setter_fo(self,opname):
''' generate the function object for the setter accessors
adding cast to the C type according to the data files(ctype)'''
inst = 'd'
opval = 'opval'
fname = get_op_setter_fn(opname)
fo = codegen.function_object_t(fname,
return_type='void',
static=True,
inline=True)
fo.add_arg('xed_decoded_inst_t* %s' % inst)
fo.add_arg('%s %s' % (self.get_ctype(opname),opval))
op = opname.lower()
type = self.get_storage_type(opname)
fo.add_code_eol('%s->_operands.%s = (%s)%s' % (inst, op, type ,opval))
return fo
def _gen_generic_getter(self):
''' for xed's internal usage (printing) we need to be able to
get an operand based on its index.
generating here a switch/case over the operand index to call the
correct getter function '''
inst = 'd'
fname = 'xed3_get_generic_operand'
ret_arg = 'ret_arg'
fo = codegen.function_object_t(fname,
return_type='void',
static=False,
inline=False,
dll_export=True)
fo.add_arg('const xed_decoded_inst_t* %s' % inst)
fo.add_arg('xed_operand_enum_t operand')
fo.add_arg('void* %s' % ret_arg)
switch_gen = codegen.c_switch_generator_t('operand',fo)
op_names = sorted(self.operand_fields.keys())
for op in op_names:
switch_key = "XED_OPERAND_%s" % op
ctype = self.get_ctype(op)
func_getter = "%s(d)" % get_op_getter_fn(op)
code = "*((%s*)%s)=%s;" % (ctype,ret_arg,func_getter)
switch_gen.add_case(switch_key,[code])
switch_gen.add_default(['xed_assert(0);'])
switch_gen.finish()
return fo
def _gen_generic_setter(self):
''' generating a switch/case over the operand index to call the
correct setter function '''
inst = 'd'
fname = 'xed3_set_generic_operand'
in_value = 'val'
fo = codegen.function_object_t(fname,
return_type='void',
static=False,
inline=False,
dll_export=True)
fo.add_arg('xed_decoded_inst_t* %s' % inst)
fo.add_arg('xed_operand_enum_t operand')
fo.add_arg('xed_uint32_t %s' % in_value)
switch_gen = codegen.c_switch_generator_t('operand',fo)
op_names = sorted(self.operand_fields.keys())
for op in op_names:
switch_key = "XED_OPERAND_%s" % op
ctype = self.get_ctype(op)
func_setter = get_op_setter_fn(op)
code = "%s(%s,(%s)%s);" % (func_setter,inst,ctype,in_value)
switch_gen.add_case(switch_key,[code])
switch_gen.add_default(['xed_assert(0);'])
switch_gen.finish()
return fo
def dump_operand_accessors(self,agi):
''' Dump operand accessor to inspect the data
structure xed_operand_storage_t '''
fo_list = []
h_fname = get_operand_accessors_fn()
c_fname = h_fname.replace('.h', '.c')
for opname in self.operand_fields.keys():
getter_fo = self._gen_op_getter_fo(opname)
setter_fo = self._gen_op_setter_fo(opname)
fo_list.append(getter_fo)
fo_list.append(setter_fo)
# generate a generic getter
generic_getter = self._gen_generic_getter()
generic_setter = self._gen_generic_setter()
xeddir = os.path.abspath(agi.common.options.xeddir)
gendir = agi.common.options.gendir
h_file = codegen.xed_file_emitter_t(xeddir,gendir,
h_fname, shell_file=False,
is_private=False)
h_file.add_header(['xed-decoded-inst.h','xed-operand-storage.h'])
h_file.start()
for fo in fo_list:
decl = fo.emit_header()
h_file.add_code(decl)
h_file.add_code(generic_getter.emit_header())
h_file.add_code(generic_setter.emit_header())
for fo in fo_list:
fo.emit_file_emitter(h_file)
h_file.close()
c_file = codegen.file_emitter_t(gendir,
c_fname, shell_file=False)
c_file.add_header(h_fname)
c_file.start()
generic_getter.emit_file_emitter(c_file)
generic_setter.emit_file_emitter(c_file)
c_file.close()
def _fix_bit_width_for_enums(self,agi):
''' the default width of the nums is to big and wasteful.
we get the list of all values for each enum in agi
and set the bitwidth to the minimal width needed.
'''
# mx_bits is a mapping from enum name to the minimal number
# of bits required to represent it
max_bits_for_enum = self._gen_max_bits_per_enum(agi.all_enums)
for op in list(self.operand_fields.values()):
if op.ctype in max_bits_for_enum:
needed_bits = max_bits_for_enum[op.ctype]
if op.bitwidth < needed_bits:
# verify that the specified bitwidth form the data files
# is not smaller than the calculated
vals = agi.all_enums[op.ctype]
err = 'bit width for % is to small, has %d values'
genutil.die(err % (op.name,vals))
else:
op.bitwidth = max_bits_for_enum[op.ctype]
def _compute_type_in_storage(self):
''' detect the minimal C type data type can be used to represent
the operand.
the accessors will cast the operand to its C type according to the
data files'''
for op in list(self.operand_fields.values()):
width = op.bitwidth
if width <= 8:
op.storage_type = 'xed_uint8_t'
elif width <=16:
op.storage_type ='xed_uint16_t'
elif width <=32:
op.storage_type = 'xed_uint32_t'
elif width <=64:
op.storage_type = 'xed_uint64_t'
else:
genutil.die("unhandled width")
def emit(self,agi):
''' emit the date type xed_operand_storage_t'''
filename = 'xed-operand-storage.h'
xeddir = agi.common.options.xeddir
gendir = agi.common.options.gendir
fe = codegen.xed_file_emitter_t(xeddir, gendir, filename)
fe.headers.remove('xed-internal-header.h')
headers = ['xed-chip-enum.h', 'xed-error-enum.h', 'xed-iclass-enum.h',
'xed-reg-enum.h','xed-operand-element-type-enum.h']
fe.add_header(headers)
fe.start()
cgen = codegen.c_class_generator_t('xed_operand_storage_t',
var_prefix='')
#compute the minimal ctype required to represent each enum
self._fix_bit_width_for_enums(agi)
#compute the ctype of the operand ad represented in the operand storage
self._compute_type_in_storage()
if self.compressed:
self.bins = self._compress_operands()
operands = list(self.operand_fields.values())
un_compressed = list(filter(lambda x: x.compressed == False, operands ))
un_compressed = sort_cmp_operands(un_compressed)
# first emit all the operands that does not use bit fields
for op in un_compressed:
cgen.add_var(op.name.lower(), op.storage_type,
accessors='none')
#emit the operand with bit fields
for i,xbin in enumerate(self.bins):
for op in xbin.operands:
cgen.add_var(op.name.lower(), xbin.storage_ctype,
bit_width=op.bitwidth, accessors='none')
else:
operands_sorted = list(self.operand_fields.values())
operands_sorted = sort_cmp_operands(operands_sorted)
for op in operands_sorted:
cgen.add_var(op.name.lower(), op.storage_type,
accessors='none')
lines = cgen.emit_decl()
fe.writelines(lines)
fe.close()
def _get_num_elements_in_enum(self,values_list):
''' return the number of elements in the enum.
is the elements does not have the x_LAST enum add it'''
has_last = False
for val in values_list:
if 'LAST' in val:
has_last = True
if has_last:
return len(values_list)
return len(values_list) + 1
def _gen_max_bits_per_enum(self,all_enums):
''' calculate the number of bits required to capture the each enum.
returning a dict of enum name to the number of required bits '''
widths = {}
for (enum_name, values_list) in list(all_enums.items()):
num_values = self._get_num_elements_in_enum(values_list)
log2 = math.log(num_values,2)
needed_bits = int(math.ceil(log2))
widths[enum_name]= needed_bits
# special handling for xed_error_enum_t.
# this width is hard coded since we can not capture the values
# of this enum in the generator
widths['xed_error_enum_t'] = 4
return widths
def _get_candidates_for_compression(self):
''' collect all the operands that we need to compress.
the operands that we need to compress has bitwidth smaller then
their ctype can hold. '''
candiadtes = []
for op in list(self.operand_fields.values()):
# for optimization those operands are not using bit with
# FIXME: add field to the operands for excluding hot fields
# form being compressed
#if op.name.lower() in ['error','outreg','mode']:
# continue
if op.bitwidth != 32 and op.bitwidth != 64:
candiadtes.append(op)
return candiadtes
def _place_operand_in_bin(self,op,bins):
''' find a bin that has place for the operand '''
for xbin in bins:
if xbin.operand_fits(op):
xbin.add_operand(op)
return
#did not find any matching bin, need to create new one
xbin = operands_bin_t()
xbin.add_operand(op)
bins.append(xbin)
return
def _partition_to_bins(self,ops_sorted):
''' partition all the operands in bins '''
bins = []
for op in ops_sorted:
self._place_operand_in_bin(op,bins)
op.compressed = True
return bins
def _compress_operands(self):
''' most of the operands's width are less than their c type.
in order to save space we are bin packing the operands.
each bin is 32bit width.
using First Fit Decreasing(FFD) strategy '''
operands = self._get_candidates_for_compression()
operands = sort_cmp_operands(operands)
bins = self._partition_to_bins(operands)
return bins
class operands_bin_t(object):
''' This class represents a single bin that aggregates a list of operands
into single c struct '''
def __init__(self):
self.operands = [] #list of operands
self.size = 0 #total width in bits
self.max_size = 32 #the max width
self.storage_ctype = 'xed_uint32_t' #the C type used for the operands
def add_operand(self,op):
''' adding single operand to this bin'''
self.operands.append(op)
self.size += op.bitwidth
def operand_fits(self,operand):
''' checks if the given operand can be inserted into this bin '''
return operand.bitwidth + self.size <= self.max_size
################
# Those are global functions that are accessed for places that do not have the
# operands_storage_t type
################
def get_operand_accessors_fn():
return 'xed-operand-accessors.h'
def get_op_getter_full_func(opname,strings_dict):
obj_name = strings_dict['obj_str']
accessor = get_op_getter_fn(opname)
return '%s(%s)' % (accessor,obj_name)
def get_op_getter_fn(opname):
xed_accessor_fn_pfx = ildutil.xed_strings['op_accessor']
return '%s_get_%s' % (xed_accessor_fn_pfx, opname.lower())
def get_op_setter_fn(opname):
xed_accessor_fn_pfx = ildutil.xed_strings['op_accessor']
return '%s_set_%s' % (xed_accessor_fn_pfx, opname.lower())
| |
# Copyright 2011 Isaku Yamahata <yamahata@valinux co jp>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import strutils
import six
from nova import exception
from nova.i18n import _
from nova import utils
from nova.virt import driver
CONF = cfg.CONF
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
LOG = logging.getLogger(__name__)
DEFAULT_ROOT_DEV_NAME = '/dev/sda1'
_DEFAULT_MAPPINGS = {'ami': 'sda1',
'ephemeral0': 'sda2',
'root': DEFAULT_ROOT_DEV_NAME,
'swap': 'sda3'}
bdm_legacy_fields = set(['device_name', 'delete_on_termination',
'virtual_name', 'snapshot_id',
'volume_id', 'volume_size', 'no_device',
'connection_info'])
bdm_new_fields = set(['source_type', 'destination_type',
'guest_format', 'device_type', 'disk_bus', 'boot_index',
'device_name', 'delete_on_termination', 'snapshot_id',
'volume_id', 'volume_size', 'image_id', 'no_device',
'connection_info'])
bdm_db_only_fields = set(['id', 'instance_uuid'])
bdm_db_inherited_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
bdm_new_non_api_fields = set(['volume_id', 'snapshot_id',
'image_id', 'connection_info'])
bdm_new_api_only_fields = set(['uuid'])
bdm_new_api_fields = ((bdm_new_fields - bdm_new_non_api_fields) |
bdm_new_api_only_fields)
class BlockDeviceDict(dict):
"""Represents a Block Device Mapping in Nova."""
_fields = bdm_new_fields
_db_only_fields = (bdm_db_only_fields |
bdm_db_inherited_fields)
_required_fields = set(['source_type'])
def __init__(self, bdm_dict=None, do_not_default=None, **kwargs):
super(BlockDeviceDict, self).__init__()
bdm_dict = bdm_dict or {}
bdm_dict.update(kwargs)
do_not_default = do_not_default or set()
self._validate(bdm_dict)
if bdm_dict.get('device_name'):
bdm_dict['device_name'] = prepend_dev(bdm_dict['device_name'])
bdm_dict['delete_on_termination'] = bool(
bdm_dict.get('delete_on_termination'))
# NOTE (ndipanov): Never default db fields
self.update({field: None for field in self._fields - do_not_default})
self.update(list(six.iteritems(bdm_dict)))
def _validate(self, bdm_dict):
"""Basic data format validations."""
dict_fields = set(key for key, _ in six.iteritems(bdm_dict))
# Check that there are no bogus fields
if not (dict_fields <=
(self._fields | self._db_only_fields)):
raise exception.InvalidBDMFormat(
details=_("Some fields are invalid."))
if bdm_dict.get('no_device'):
return
# Check that all required fields are there
if (self._required_fields and
not ((dict_fields & self._required_fields) ==
self._required_fields)):
raise exception.InvalidBDMFormat(
details=_("Some required fields are missing"))
if 'delete_on_termination' in bdm_dict:
bdm_dict['delete_on_termination'] = strutils.bool_from_string(
bdm_dict['delete_on_termination'])
if bdm_dict.get('device_name') is not None:
validate_device_name(bdm_dict['device_name'])
validate_and_default_volume_size(bdm_dict)
if bdm_dict.get('boot_index'):
try:
bdm_dict['boot_index'] = int(bdm_dict['boot_index'])
except ValueError:
raise exception.InvalidBDMFormat(
details=_("Boot index is invalid."))
@classmethod
def from_legacy(cls, legacy_bdm):
copy_over_fields = bdm_legacy_fields & bdm_new_fields
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
# NOTE (ndipanov): These fields cannot be computed
# from legacy bdm, so do not default them
# to avoid overwriting meaningful values in the db
non_computable_fields = set(['boot_index', 'disk_bus',
'guest_format', 'device_type'])
new_bdm = {fld: val for fld, val in six.iteritems(legacy_bdm)
if fld in copy_over_fields}
virt_name = legacy_bdm.get('virtual_name')
if is_swap_or_ephemeral(virt_name):
new_bdm['source_type'] = 'blank'
new_bdm['delete_on_termination'] = True
new_bdm['destination_type'] = 'local'
if virt_name == 'swap':
new_bdm['guest_format'] = 'swap'
else:
new_bdm['guest_format'] = CONF.default_ephemeral_format
elif legacy_bdm.get('snapshot_id'):
new_bdm['source_type'] = 'snapshot'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('volume_id'):
new_bdm['source_type'] = 'volume'
new_bdm['destination_type'] = 'volume'
elif legacy_bdm.get('no_device'):
# NOTE (ndipanov): Just keep the BDM for now,
pass
else:
raise exception.InvalidBDMFormat(
details=_("Unrecognized legacy format."))
return cls(new_bdm, non_computable_fields)
@classmethod
def from_api(cls, api_dict, image_uuid_specified):
"""Transform the API format of data to the internally used one.
Only validate if the source_type field makes sense.
"""
if not api_dict.get('no_device'):
source_type = api_dict.get('source_type')
device_uuid = api_dict.get('uuid')
destination_type = api_dict.get('destination_type')
if source_type not in ('volume', 'image', 'snapshot', 'blank'):
raise exception.InvalidBDMFormat(
details=_("Invalid source_type field."))
elif source_type == 'blank' and device_uuid:
raise exception.InvalidBDMFormat(
details=_("Invalid device UUID."))
elif source_type != 'blank':
if not device_uuid:
raise exception.InvalidBDMFormat(
details=_("Missing device UUID."))
api_dict[source_type + '_id'] = device_uuid
if source_type == 'image' and destination_type == 'local':
try:
boot_index = int(api_dict.get('boot_index', -1))
except ValueError:
raise exception.InvalidBDMFormat(
details=_("Boot index is invalid."))
# if this bdm is generated from --image ,then
# source_type = image and destination_type = local is allowed
if not (image_uuid_specified and boot_index == 0):
raise exception.InvalidBDMFormat(
details=_("Mapping image to local is not supported."))
api_dict.pop('uuid', None)
return cls(api_dict)
def legacy(self):
copy_over_fields = bdm_legacy_fields - set(['virtual_name'])
copy_over_fields |= (bdm_db_only_fields |
bdm_db_inherited_fields)
legacy_block_device = {field: self.get(field)
for field in copy_over_fields if field in self}
source_type = self.get('source_type')
destination_type = self.get('destination_type')
no_device = self.get('no_device')
if source_type == 'blank':
if self['guest_format'] == 'swap':
legacy_block_device['virtual_name'] = 'swap'
else:
# NOTE (ndipanov): Always label as 0, it is up to
# the calling routine to re-enumerate them
legacy_block_device['virtual_name'] = 'ephemeral0'
elif source_type in ('volume', 'snapshot') or no_device:
legacy_block_device['virtual_name'] = None
elif source_type == 'image':
if destination_type != 'volume':
# NOTE(ndipanov): Image bdms with local destination
# have no meaning in the legacy format - raise
raise exception.InvalidBDMForLegacy()
legacy_block_device['virtual_name'] = None
return legacy_block_device
def get_image_mapping(self):
drop_fields = (set(['connection_info']) |
self._db_only_fields)
mapping_dict = dict(self)
for fld in drop_fields:
mapping_dict.pop(fld, None)
return mapping_dict
def is_safe_for_update(block_device_dict):
"""Determine if passed dict is a safe subset for update.
Safe subset in this case means a safe subset of both legacy
and new versions of data, that can be passed to an UPDATE query
without any transformation.
"""
fields = set(block_device_dict.keys())
return fields <= (bdm_new_fields |
bdm_db_inherited_fields |
bdm_db_only_fields)
def create_image_bdm(image_ref, boot_index=0):
"""Create a block device dict based on the image_ref.
This is useful in the API layer to keep the compatibility
with having an image_ref as a field in the instance requests
"""
return BlockDeviceDict(
{'source_type': 'image',
'image_id': image_ref,
'delete_on_termination': True,
'boot_index': boot_index,
'device_type': 'disk',
'destination_type': 'local'})
def create_blank_bdm(size, guest_format=None):
return BlockDeviceDict(
{'source_type': 'blank',
'delete_on_termination': True,
'device_type': 'disk',
'boot_index': -1,
'destination_type': 'local',
'guest_format': guest_format,
'volume_size': size})
def snapshot_from_bdm(snapshot_id, template):
"""Create a basic volume snapshot BDM from a given template bdm."""
copy_from_template = ('disk_bus', 'device_type', 'boot_index',
'delete_on_termination', 'volume_size',
'device_name')
snapshot_dict = {'source_type': 'snapshot',
'destination_type': 'volume',
'snapshot_id': snapshot_id}
for key in copy_from_template:
snapshot_dict[key] = template.get(key)
return BlockDeviceDict(snapshot_dict)
def legacy_mapping(block_device_mapping):
"""Transform a list of block devices of an instance back to the
legacy data format.
"""
legacy_block_device_mapping = []
for bdm in block_device_mapping:
try:
legacy_block_device = BlockDeviceDict(bdm).legacy()
except exception.InvalidBDMForLegacy:
continue
legacy_block_device_mapping.append(legacy_block_device)
# Re-enumerate the ephemeral devices
for i, dev in enumerate(dev for dev in legacy_block_device_mapping
if dev['virtual_name'] and
is_ephemeral(dev['virtual_name'])):
dev['virtual_name'] = dev['virtual_name'][:-1] + str(i)
return legacy_block_device_mapping
def from_legacy_mapping(legacy_block_device_mapping, image_uuid='',
root_device_name=None, no_root=False):
"""Transform a legacy list of block devices to the new data format."""
new_bdms = [BlockDeviceDict.from_legacy(legacy_bdm)
for legacy_bdm in legacy_block_device_mapping]
# NOTE (ndipanov): We will not decide which device is root here - we assume
# that it will be supplied later. This is useful for having the root device
# as part of the image defined mappings that are already in the v2 format.
if no_root:
for bdm in new_bdms:
bdm['boot_index'] = -1
return new_bdms
image_bdm = None
volume_backed = False
# Try to assign boot_device
if not root_device_name and not image_uuid:
# NOTE (ndipanov): If there is no root_device, pick the first non
# blank one.
non_blank = [bdm for bdm in new_bdms if bdm['source_type'] != 'blank']
if non_blank:
non_blank[0]['boot_index'] = 0
else:
for bdm in new_bdms:
if (bdm['source_type'] in ('volume', 'snapshot', 'image') and
root_device_name is not None and
(strip_dev(bdm.get('device_name')) ==
strip_dev(root_device_name))):
bdm['boot_index'] = 0
volume_backed = True
elif not bdm['no_device']:
bdm['boot_index'] = -1
else:
bdm['boot_index'] = None
if not volume_backed and image_uuid:
image_bdm = create_image_bdm(image_uuid, boot_index=0)
return ([image_bdm] if image_bdm else []) + new_bdms
def properties_root_device_name(properties):
"""get root device name from image meta data.
If it isn't specified, return None.
"""
root_device_name = None
# NOTE(yamahata): see image_service.s3.s3create()
for bdm in properties.get('mappings', []):
if bdm['virtual'] == 'root':
root_device_name = bdm['device']
# NOTE(yamahata): register_image's command line can override
# <machine>.manifest.xml
if 'root_device_name' in properties:
root_device_name = properties['root_device_name']
return root_device_name
def validate_device_name(value):
try:
# NOTE (ndipanov): Do not allow empty device names
# until assigning default values
# is supported by nova.compute
utils.check_string_length(value, 'Device name',
min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidBDMFormat(
details=_("Device name empty or too long."))
if ' ' in value:
raise exception.InvalidBDMFormat(
details=_("Device name contains spaces."))
def validate_and_default_volume_size(bdm):
if bdm.get('volume_size'):
try:
bdm['volume_size'] = utils.validate_integer(
bdm['volume_size'], 'volume_size', min_value=0)
except exception.InvalidInput:
# NOTE: We can remove this validation code after removing
# Nova v2.0 API code because v2.1 API validates this case
# already at its REST API layer.
raise exception.InvalidBDMFormat(
details=_("Invalid volume_size."))
_ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$')
def is_ephemeral(device_name):
return _ephemeral.match(device_name) is not None
def ephemeral_num(ephemeral_name):
assert is_ephemeral(ephemeral_name)
return int(_ephemeral.sub('\\1', ephemeral_name))
def is_swap_or_ephemeral(device_name):
return (device_name and
(device_name == 'swap' or is_ephemeral(device_name)))
def new_format_is_swap(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') == 'swap'):
return True
return False
def new_format_is_ephemeral(bdm):
if (bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local' and
bdm.get('guest_format') != 'swap'):
return True
return False
def get_root_bdm(bdms):
try:
return next(bdm for bdm in bdms if bdm.get('boot_index', -1) == 0)
except StopIteration:
return None
def get_bdms_to_connect(bdms, exclude_root_mapping=False):
"""Will return non-root mappings, when exclude_root_mapping is true.
Otherwise all mappings will be returned.
"""
return (bdm for bdm in bdms if bdm.get('boot_index', -1) != 0 or
not exclude_root_mapping)
def mappings_prepend_dev(mappings):
"""Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
(not m['device'].startswith('/'))):
m['device'] = '/dev/' + m['device']
return mappings
_dev = re.compile('^/dev/')
def strip_dev(device_name):
"""remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
def prepend_dev(device_name):
"""Make sure there is a leading '/dev/'."""
return device_name and '/dev/' + strip_dev(device_name)
_pref = re.compile('^((x?v|s|h)d)')
def strip_prefix(device_name):
"""remove both leading /dev/ and xvd or sd or vd or hd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
_nums = re.compile('\d+')
def get_device_letter(device_name):
letter = strip_prefix(device_name)
# NOTE(vish): delete numbers in case we have something like
# /dev/sda1
return _nums.sub('', letter)
def instance_block_mapping(instance, bdms):
root_device_name = instance['root_device_name']
# NOTE(clayg): remove this when xenapi is setting default_root_device
if root_device_name is None:
if driver.compute_driver_matches('xenapi.XenAPIDriver'):
root_device_name = '/dev/xvda'
else:
return _DEFAULT_MAPPINGS
mappings = {}
mappings['ami'] = strip_dev(root_device_name)
mappings['root'] = root_device_name
default_ephemeral_device = instance.get('default_ephemeral_device')
if default_ephemeral_device:
mappings['ephemeral0'] = default_ephemeral_device
default_swap_device = instance.get('default_swap_device')
if default_swap_device:
mappings['swap'] = default_swap_device
ebs_devices = []
blanks = []
# 'ephemeralN', 'swap' and ebs
for bdm in bdms:
# ebs volume case
if bdm.destination_type == 'volume':
ebs_devices.append(bdm.device_name)
continue
if bdm.source_type == 'blank':
blanks.append(bdm)
# NOTE(yamahata): I'm not sure how ebs device should be numbered.
# Right now sort by device name for deterministic
# result.
if ebs_devices:
ebs_devices.sort()
for nebs, ebs in enumerate(ebs_devices):
mappings['ebs%d' % nebs] = ebs
swap = [bdm for bdm in blanks if bdm.guest_format == 'swap']
if swap:
mappings['swap'] = swap.pop().device_name
ephemerals = [bdm for bdm in blanks if bdm.guest_format != 'swap']
if ephemerals:
for num, eph in enumerate(ephemerals):
mappings['ephemeral%d' % num] = eph.device_name
return mappings
def match_device(device):
"""Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
return match.groups()
def volume_in_mapping(mount_device, block_device_info):
block_device_list = [strip_dev(vol['mount_device'])
for vol in
driver.block_device_info_get_mapping(
block_device_info)]
swap = driver.block_device_info_get_swap(block_device_info)
if driver.swap_is_usable(swap):
block_device_list.append(strip_dev(swap['device_name']))
block_device_list += [strip_dev(ephemeral['device_name'])
for ephemeral in
driver.block_device_info_get_ephemerals(
block_device_info)]
LOG.debug("block_device_list %s", block_device_list)
return strip_dev(mount_device) in block_device_list
def get_bdm_ephemeral_disk_size(block_device_mappings):
return sum(bdm.get('volume_size', 0)
for bdm in block_device_mappings
if new_format_is_ephemeral(bdm))
def get_bdm_swap_list(block_device_mappings):
return [bdm for bdm in block_device_mappings
if new_format_is_swap(bdm)]
def get_bdm_local_disk_num(block_device_mappings):
return len([bdm for bdm in block_device_mappings
if bdm.get('destination_type') == 'local'])
| |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Optimized NVP client for Quark
"""
import aiclib
from oslo_log import log as logging
from quark.db import models
from quark.drivers.nvp_driver import NVPDriver
import sqlalchemy as sa
from sqlalchemy import orm
LOG = logging.getLogger(__name__)
class OptimizedNVPDriver(NVPDriver):
def __init__(self):
super(OptimizedNVPDriver, self).__init__()
@classmethod
def get_name(klass):
return "NVP"
def delete_network(self, context, network_id):
lswitches = self._lswitches_for_network(context, network_id)
for switch in lswitches:
try:
self._lswitch_delete(context, switch.nvp_id)
except aiclib.core.AICException as ae:
LOG.info("LSwitch/Network %s found in database."
" Adding to orphaned database table."
% network_id)
if ae.code != 404:
LOG.info("LSwitch/Network %s was found in NVP."
" Adding to orpaned table for later cleanup."
" Code: %s, Message: %s"
% (network_id, ae.code, ae.message))
orphaned_lswitch = OrphanedLSwitch(
nvp_id=switch.nvp_id,
network_id=switch.network_id,
display_name=switch.display_name
)
context.session.add(orphaned_lswitch)
LOG.info("Deleting LSwitch/Network %s from original"
" table." % network_id)
context.session.delete(switch)
except Exception as e:
message = e.args[0] if e.args else ''
LOG.info("Failed to delete LSwitch/Network %s from "
" NVP (optimized). Message: %s"
% (network_id, message))
def create_port(self, context, network_id, port_id,
status=True, security_groups=None,
device_id="", **kwargs):
security_groups = security_groups or []
nvp_port = super(OptimizedNVPDriver, self).create_port(
context, network_id, port_id, status=status,
security_groups=security_groups, device_id=device_id)
switch_nvp_id = nvp_port["lswitch"]
# slightly inefficient for the sake of brevity. Lets the
# parent class do its thing then finds the switch that
# the port was created on for creating the association. Switch should
# be in the query cache so the subsequent lookup should be minimal,
# but this could be an easy optimization later if we're looking.
switch = self._lswitch_select_by_nvp_id(context, switch_nvp_id)
new_port = LSwitchPort(port_id=nvp_port["uuid"],
switch_id=switch.id)
context.session.add(new_port)
switch.port_count = switch.port_count + 1
return nvp_port
def update_port(self, context, port_id, status=True,
security_groups=None, **kwargs):
security_groups = security_groups or []
nvp_port = super(OptimizedNVPDriver, self).update_port(
context, port_id, status=status,
security_groups=security_groups)
port = self._lport_select_by_id(context, port_id)
port.update(nvp_port)
def delete_port(self, context, port_id, **kwargs):
port = self._lport_select_by_id(context, port_id)
switch = port.switch
try:
self._lport_delete(context, port_id, switch)
except aiclib.core.AICException as ae:
LOG.info("LSwitchPort/Port %s found in database."
" Adding to orphaned database table."
% port_id)
if ae.code != 404:
LOG.info("LSwitchPort/Port %s was found in NVP."
" Adding to orpaned table for later cleanup."
" Code: %s, Message: %s"
% (port_id, ae.code, ae.args[0]))
orphaned_lswitch_port = OrphanedLSwitchPort(
port_id=port_id,
)
context.session.add(orphaned_lswitch_port)
except Exception as e:
LOG.info("Failed to delete LSwitchPort/Port %s from "
" NVP (optimized). Message: %s"
% (port_id, e.args[0]))
LOG.info("Deleting LSwitchPort/Port %s from original"
" table." % port_id)
context.session.delete(port)
switch.port_count = switch.port_count - 1
if switch.port_count == 0:
switches = self._lswitches_for_network(
context, switch.network_id)
if len(switches) > 1:
self._lswitch_delete(context, switch.nvp_id)
def _lport_delete(self, context, port_id, switch=None):
if switch is None:
port = self._lport_select_by_id(context, port_id)
switch = port.switch
super(OptimizedNVPDriver, self).delete_port(
context, port_id, lswitch_uuid=switch.nvp_id)
def create_security_group(self, context, group_name, **group):
nvp_group = super(OptimizedNVPDriver, self).create_security_group(
context, group_name, **group)
group_id = group.get('group_id')
profile = SecurityProfile(id=group_id, nvp_id=nvp_group['uuid'])
context.session.add(profile)
def delete_security_group(self, context, group_id, **kwargs):
super(OptimizedNVPDriver, self).delete_security_group(
context, group_id)
group = self._query_security_group(context, group_id)
context.session.delete(group)
def _lport_select_by_id(self, context, port_id):
query = context.session.query(LSwitchPort)
query = query.filter(LSwitchPort.port_id == port_id)
return query.first()
def _lswitch_delete(self, context, lswitch_uuid):
switch = self._lswitch_select_by_nvp_id(context, lswitch_uuid)
super(OptimizedNVPDriver, self)._lswitch_delete(
context, lswitch_uuid)
context.session.delete(switch)
def _lswitch_select_by_nvp_id(self, context, nvp_id):
switch = context.session.query(LSwitch).filter(
LSwitch.nvp_id == nvp_id).first()
return switch
def _lswitch_select_first(self, context, network_id):
query = context.session.query(LSwitch)
query = query.filter(LSwitch.network_id == network_id)
return query.first()
def _lswitch_select_free(self, context, network_id):
query = context.session.query(LSwitch)
query = query.filter(LSwitch.port_count <
self.limits['max_ports_per_switch'])
query = query.filter(LSwitch.network_id == network_id)
switch = query.order_by(LSwitch.port_count).first()
return switch
def _lswitch_status_query(self, context, network_id):
"""Child implementation of lswitch_status_query.
Deliberately empty as we rely on _get_network_details to be more
efficient than we can be here.
"""
pass
def _lswitch_select_open(self, context, network_id=None, **kwargs):
if self.limits['max_ports_per_switch'] == 0:
switch = self._lswitch_select_first(context, network_id)
else:
switch = self._lswitch_select_free(context, network_id)
if switch:
return switch.nvp_id
LOG.debug("Could not find optimized switch")
def _get_network_details(self, context, network_id, switches):
name, phys_net, phys_type, segment_id = None, None, None, None
switch = self._lswitch_select_first(context, network_id)
if switch:
name = switch.display_name
phys_net = switch.transport_zone
phys_type = switch.transport_connector
segment_id = switch.segment_id
return dict(network_name=name, phys_net=phys_net,
phys_type=phys_type, segment_id=segment_id)
def _lswitch_create(self, context, network_name=None, tags=None,
network_id=None, **kwargs):
nvp_id = super(OptimizedNVPDriver, self)._lswitch_create(
context, network_name, tags, network_id, **kwargs)
return self._lswitch_create_optimized(context, network_name, nvp_id,
network_id, **kwargs).nvp_id
def _lswitch_create_optimized(self, context, network_name, nvp_id,
network_id, phys_net=None, phys_type=None,
segment_id=None):
new_switch = LSwitch(nvp_id=nvp_id, network_id=network_id,
port_count=0, transport_zone=phys_net,
transport_connector=phys_type,
display_name=network_name[:40],
segment_id=segment_id)
context.session.add(new_switch)
return new_switch
def _lswitches_for_network(self, context, network_id):
switches = context.session.query(LSwitch).filter(
LSwitch.network_id == network_id).all()
return switches
def _lswitch_from_port(self, context, port_id):
port = self._lport_select_by_id(context, port_id)
return port.switch.nvp_id
def _query_security_group(self, context, group_id):
return context.session.query(SecurityProfile).filter(
SecurityProfile.id == group_id).first()
def _make_security_rule_dict(self, rule):
res = {"port_range_min": rule.get("port_range_min"),
"port_range_max": rule.get("port_range_max"),
"protocol": rule.get("protocol"),
"ip_prefix": rule.get("remote_ip_prefix"),
"group_id": rule.get("remote_group_id"),
"ethertype": rule.get("ethertype")}
for key, value in res.items():
if value is None:
res.pop(key)
return res
def _get_security_group(self, context, group_id):
group = context.session.query(models.SecurityGroup).filter(
models.SecurityGroup.id == group_id).first()
rulelist = {'ingress': [], 'egress': []}
for rule in group.rules:
rulelist[rule.direction].append(
self._make_security_rule_dict(rule))
return {'uuid': self._query_security_group(context, group_id).nvp_id,
'logical_port_ingress_rules': rulelist['ingress'],
'logical_port_egress_rules': rulelist['egress']}
def _check_rule_count_per_port(self, context, group_id):
ports = context.session.query(models.SecurityGroup).filter(
models.SecurityGroup.id == group_id).first().get('ports', [])
groups = (set(group.id for group in port.get('security_groups', []))
for port in ports)
return max(self._check_rule_count_for_groups(
context, (self._get_security_group(context, id) for id in g))
for g in groups)
class LSwitchPort(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_lswitchport"
port_id = sa.Column(sa.String(36), nullable=False, index=True)
switch_id = sa.Column(sa.String(36),
sa.ForeignKey("quark_nvp_driver_lswitch.id"),
nullable=False)
class LSwitch(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_lswitch"
nvp_id = sa.Column(sa.String(36), nullable=False, index=True)
network_id = sa.Column(sa.String(36), nullable=False, index=True)
display_name = sa.Column(sa.String(255))
port_count = sa.Column(sa.Integer())
ports = orm.relationship(LSwitchPort, backref='switch')
transport_zone = sa.Column(sa.String(36))
transport_connector = sa.Column(sa.String(20))
segment_id = sa.Column(sa.Integer())
class QOS(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_qos"
display_name = sa.Column(sa.String(255), nullable=False)
max_bandwidth_rate = sa.Column(sa.Integer(), nullable=False)
min_bandwidth_rate = sa.Column(sa.Integer(), nullable=False)
class SecurityProfile(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_driver_security_profile"
nvp_id = sa.Column(sa.String(36), nullable=False, index=True)
class OrphanedLSwitch(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_orphaned_lswitches"
nvp_id = sa.Column(sa.String(36), nullable=False, index=True)
network_id = sa.Column(sa.String(36), nullable=False, index=True)
display_name = sa.Column(sa.String(255), index=True)
class OrphanedLSwitchPort(models.BASEV2, models.HasId):
__tablename__ = "quark_nvp_orphaned_lswitch_ports"
port_id = sa.Column(sa.String(36), nullable=False, index=True)
| |
#!/usr/bin/env python
import argparse
import cPickle
import gzip
import bz2
import logging
import os
import numpy
import tables
from collections import Counter
from operator import add
from numpy.lib.stride_tricks import as_strided
parser = argparse.ArgumentParser(
description="""
This takes a list of .txt or .txt.gz files and does word counting and
creating a dictionary (potentially limited by size). It uses this
dictionary to binarize the text into a numeric format (replacing OOV
words with 1) and create n-grams of a fixed size (padding the sentence
with 0 for EOS and BOS markers as necessary). The n-gram data can be
split up in a training and validation set.
The n-grams are saved to HDF5 format whereas the dictionary, word counts
and binarized text are all pickled Python objects.
""", formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("input", type=argparse.FileType('r'), nargs="+",
help="The input files")
parser.add_argument("-b", "--binarized-text", default='binarized_text.pkl',
help="the name of the pickled binarized text file")
parser.add_argument("-d", "--dictionary", default='vocab.pkl',
help="the name of the pickled binarized text file")
parser.add_argument("-r", "--reuse-dict", action="store_true",
help="Reuse the dictionary. May not work with --ngram")
parser.add_argument("-n", "--ngram", type=int, metavar="N",
help="create n-grams")
parser.add_argument("-v", "--vocab", type=int, metavar="N",
help="limit vocabulary size to this number, which must "
"include BOS/EOS and OOV markers")
parser.add_argument("-p", "--pickle", action="store_true",
help="pickle the text as a list of lists of ints")
parser.add_argument("-s", "--split", type=float, metavar="N",
help="create a validation set. If >= 1 take this many "
"samples for the validation set, if < 1, take this "
"fraction of the samples")
parser.add_argument("-o", "--overwrite", action="store_true",
help="overwrite earlier created files, also forces the "
"program not to reuse count files")
parser.add_argument("-e", "--each", action="store_true",
help="output files for each separate input file")
parser.add_argument("-c", "--count", action="store_true",
help="save the word counts")
def open_files():
base_filenames = []
for i, input_file in enumerate(args.input):
dirname, filename = os.path.split(input_file.name)
if filename.split(os.extsep)[-1] == 'gz':
base_filename = filename.rstrip('.gz')
elif filename.split(os.extsep)[-1] == 'bz2':
base_filename = filename.rstrip('.bz2')
else:
base_filename = filename
if base_filename.split(os.extsep)[-1] == 'txt':
base_filename = base_filename.rstrip('.txt')
if filename.split(os.extsep)[-1] == 'gz':
args.input[i] = gzip.GzipFile(input_file.name, input_file.mode,
9, input_file)
elif filename.split(os.extsep)[-1] == 'bz2':
args.input[i] = bz2.BZ2File(input_file.name, input_file.mode)
base_filenames.append(base_filename)
return base_filenames
def safe_pickle(obj, filename):
if os.path.isfile(filename) and not args.overwrite:
logger.warning("Not saving %s, already exists." % (filename))
else:
if os.path.isfile(filename):
logger.info("Overwriting %s." % filename)
else:
logger.info("Saving to %s." % filename)
with open(filename, 'wb') as f:
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
def safe_hdf(array, name):
if os.path.isfile(name + '.hdf') and not args.overwrite:
logger.warning("Not saving %s, already exists." % (name + '.hdf'))
else:
if os.path.isfile(name + '.hdf'):
logger.info("Overwriting %s." % (name + '.hdf'))
else:
logger.info("Saving to %s." % (name + '.hdf'))
with tables.openFile(name + '.hdf', 'w') as f:
atom = tables.Atom.from_dtype(array.dtype)
filters = tables.Filters(complib='blosc', complevel=5)
ds = f.createCArray(f.root, name.replace('.', ''), atom,
array.shape, filters=filters)
ds[:] = array
def create_dictionary():
# Part I: Counting the words
counters = []
sentence_counts = []
for input_file, base_filename in zip(args.input, base_filenames):
count_filename = base_filename + '.count.pkl'
input_filename = os.path.basename(input_file.name)
if os.path.isfile(count_filename) and not args.overwrite:
logger.info("Loading word counts for %s from %s"
% (input_filename, count_filename))
with open(count_filename, 'rb') as f:
counter = cPickle.load(f)
sentence_count = sum([1 for line in input_file])
else:
logger.info("Counting words in %s" % input_filename)
counter = Counter()
sentence_count = 0
for line in input_file:
counter.update(line.strip().split(' '))
sentence_count += 1
counters.append(counter)
sentence_counts.append(sentence_count)
logger.info("%d unique words in %d sentences with a total of %d words."
% (len(counter), sentence_count, sum(counter.values())))
if args.each and args.count:
safe_pickle(counter, count_filename)
input_file.seek(0)
# Part II: Combining the counts
combined_counter = reduce(add, counters)
logger.info("Total: %d unique words in %d sentences with a total "
"of %d words."
% (len(combined_counter), sum(sentence_counts),
sum(combined_counter.values())))
if args.count:
safe_pickle(combined_counter, 'combined.count.pkl')
# Part III: Creating the dictionary
if args.vocab is not None:
if args.vocab <= 2:
logger.info('Building a dictionary with all unique words')
args.vocab = len(combined_counter) + 2
vocab_count = combined_counter.most_common(args.vocab - 2)
logger.info("Creating dictionary of %s most common words, covering "
"%2.1f%% of the text."
% (args.vocab,
100.0 * sum([count for word, count in vocab_count]) /
sum(combined_counter.values())))
else:
logger.info("Creating dictionary of all words")
vocab_count = counter.most_common()
vocab = {'UNK': 1, '<s>': 0, '</s>': 0}
for i, (word, count) in enumerate(vocab_count):
vocab[word] = i + 2
safe_pickle(vocab, args.dictionary)
return combined_counter, sentence_counts, counters, vocab
def binarize():
if args.ngram:
assert numpy.iinfo(numpy.uint16).max > len(vocab)
ngrams = numpy.empty((sum(combined_counter.values()) +
sum(sentence_counts), args.ngram),
dtype='uint16')
binarized_corpora = []
total_ngram_count = 0
for input_file, base_filename in \
zip(args.input, base_filenames):
input_filename = os.path.basename(input_file.name)
logger.info("Binarizing %s." % (input_filename))
binarized_corpus = []
ngram_count = 0
for sentence_count, sentence in enumerate(input_file):
words = sentence.strip().split(' ')
binarized_sentence = [vocab.get(word, 1) for word in words]
binarized_corpus.append(binarized_sentence)
if args.ngram:
padded_sentence = numpy.asarray(
[0] * (args.ngram - 1) + binarized_sentence + [0]
)
ngrams[total_ngram_count + ngram_count:
total_ngram_count + ngram_count + len(words) + 1] = \
as_strided(
padded_sentence,
shape=(len(words) + 1, args.ngram),
strides=(padded_sentence.itemsize,
padded_sentence.itemsize)
)
ngram_count += len(words) + 1
# endfor sentence in input_file
# Output
if args.each:
if args.pickle:
safe_pickle(binarized_corpus, base_filename + '.pkl')
if args.ngram and args.split:
if args.split >= 1:
rows = int(args.split)
else:
rows = int(ngram_count * args.split)
logger.info("Saving training set (%d samples) and validation "
"set (%d samples)."
% (ngram_count - rows, rows))
rows = numpy.random.choice(ngram_count, rows, replace=False)
safe_hdf(ngrams[total_ngram_count + rows],
base_filename + '_valid')
safe_hdf(
ngrams[total_ngram_count + numpy.setdiff1d(
numpy.arange(ngram_count),
rows, True
)], base_filename + '_train'
)
elif args.ngram:
logger.info("Saving n-grams to %s." % (base_filename + '.hdf'))
safe_hdf(ngrams, base_filename)
binarized_corpora += binarized_corpus
total_ngram_count += ngram_count
input_file.seek(0)
# endfor input_file in args.input
if args.pickle:
safe_pickle(binarized_corpora, args.binarized_text)
if args.ngram and args.split:
if args.split >= 1:
rows = int(args.split)
else:
rows = int(total_ngram_count * args.split)
logger.info("Saving training set (%d samples) and validation set (%d "
"samples)."
% (total_ngram_count - rows, rows))
rows = numpy.random.choice(total_ngram_count, rows, replace=False)
safe_hdf(ngrams[rows], 'combined_valid')
safe_hdf(ngrams[numpy.setdiff1d(numpy.arange(total_ngram_count),
rows, True)], 'combined_train')
elif args.ngram:
safe_hdf(ngrams, 'combined')
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('preprocess')
args = parser.parse_args()
base_filenames = open_files()
if not args.reuse_dict:
combined_counter, sentence_counts, counters, vocab = create_dictionary()
else:
with open(args.dictionary, 'rb') as f:
vocab = cPickle.load(f)
if args.ngram or args.pickle:
binarize()
| |
import sys
import math
import array
from .utils import (
db_to_float,
ratio_to_db,
register_pydub_effect,
make_chunks,
audioop,
get_array_type,
get_min_max_value
)
from .silence import split_on_silence
from .exceptions import TooManyMissingFrames, InvalidDuration
if sys.version_info >= (3, 0):
xrange = range
@register_pydub_effect
def normalize(seg, headroom=0.1):
"""
headroom is how close to the maximum volume to boost the signal up to (specified in dB)
"""
peak_sample_val = seg.max
# if the max is 0, this audio segment is silent, and can't be normalized
if peak_sample_val == 0:
return seg
target_peak = seg.max_possible_amplitude * db_to_float(-headroom)
needed_boost = ratio_to_db(target_peak / peak_sample_val)
return seg.apply_gain(needed_boost)
@register_pydub_effect
def speedup(seg, playback_speed=1.5, chunk_size=150, crossfade=25):
# we will keep audio in 150ms chunks since one waveform at 20Hz is 50ms long
# (20 Hz is the lowest frequency audible to humans)
# portion of AUDIO TO KEEP. if playback speed is 1.25 we keep 80% (0.8) and
# discard 20% (0.2)
atk = 1.0 / playback_speed
if playback_speed < 2.0:
# throwing out more than half the audio - keep 50ms chunks
ms_to_remove_per_chunk = int(chunk_size * (1 - atk) / atk)
else:
# throwing out less than half the audio - throw out 50ms chunks
ms_to_remove_per_chunk = int(chunk_size)
chunk_size = int(atk * chunk_size / (1 - atk))
# the crossfade cannot be longer than the amount of audio we're removing
crossfade = min(crossfade, ms_to_remove_per_chunk - 1)
# DEBUG
#print("chunk: {0}, rm: {1}".format(chunk_size, ms_to_remove_per_chunk))
chunks = make_chunks(seg, chunk_size + ms_to_remove_per_chunk)
if len(chunks) < 2:
raise Exception("Could not speed up AudioSegment, it was too short {2:0.2f}s for the current settings:\n{0}ms chunks at {1:0.1f}x speedup".format(
chunk_size, playback_speed, seg.duration_seconds))
# we'll actually truncate a bit less than we calculated to make up for the
# crossfade between chunks
ms_to_remove_per_chunk -= crossfade
# we don't want to truncate the last chunk since it is not guaranteed to be
# the full chunk length
last_chunk = chunks[-1]
chunks = [chunk[:-ms_to_remove_per_chunk] for chunk in chunks[:-1]]
out = chunks[0]
for chunk in chunks[1:]:
out = out.append(chunk, crossfade=crossfade)
out += last_chunk
return out
@register_pydub_effect
def strip_silence(seg, silence_len=1000, silence_thresh=-16, padding=100):
if padding > silence_len:
raise InvalidDuration("padding cannot be longer than silence_len")
chunks = split_on_silence(seg, silence_len, silence_thresh, padding)
crossfade = padding / 2
if not len(chunks):
return seg[0:0]
seg = chunks[0]
for chunk in chunks[1:]:
seg.append(chunk, crossfade=crossfade)
return seg
@register_pydub_effect
def compress_dynamic_range(seg, threshold=-20.0, ratio=4.0, attack=5.0, release=50.0):
"""
Keyword Arguments:
threshold - default: -20.0
Threshold in dBFS. default of -20.0 means -20dB relative to the
maximum possible volume. 0dBFS is the maximum possible value so
all values for this argument sould be negative.
ratio - default: 4.0
Compression ratio. Audio louder than the threshold will be
reduced to 1/ratio the volume. A ratio of 4.0 is equivalent to
a setting of 4:1 in a pro-audio compressor like the Waves C1.
attack - default: 5.0
Attack in milliseconds. How long it should take for the compressor
to kick in once the audio has exceeded the threshold.
release - default: 50.0
Release in milliseconds. How long it should take for the compressor
to stop compressing after the audio has falled below the threshold.
For an overview of Dynamic Range Compression, and more detailed explanation
of the related terminology, see:
http://en.wikipedia.org/wiki/Dynamic_range_compression
"""
thresh_rms = seg.max_possible_amplitude * db_to_float(threshold)
look_frames = int(seg.frame_count(ms=attack))
def rms_at(frame_i):
return seg.get_sample_slice(frame_i - look_frames, frame_i).rms
def db_over_threshold(rms):
if rms == 0: return 0.0
db = ratio_to_db(rms / thresh_rms)
return max(db, 0)
output = []
# amount to reduce the volume of the audio by (in dB)
attenuation = 0.0
attack_frames = seg.frame_count(ms=attack)
release_frames = seg.frame_count(ms=release)
for i in xrange(int(seg.frame_count())):
rms_now = rms_at(i)
# with a ratio of 4.0 this means the volume will exceed the threshold by
# 1/4 the amount (of dB) that it would otherwise
max_attenuation = (1 - (1.0 / ratio)) * db_over_threshold(rms_now)
attenuation_inc = max_attenuation / attack_frames
attenuation_dec = max_attenuation / release_frames
if rms_now > thresh_rms and attenuation <= max_attenuation:
attenuation += attenuation_inc
attenuation = min(attenuation, max_attenuation)
else:
attenuation -= attenuation_dec
attenuation = max(attenuation, 0)
frame = seg.get_frame(i)
if attenuation != 0.0:
frame = audioop.mul(frame,
seg.sample_width,
db_to_float(-attenuation))
output.append(frame)
return seg._spawn(data=b''.join(output))
# Invert the phase of the signal.
@register_pydub_effect
def invert_phase(seg):
inverted = audioop.mul(seg._data, seg.sample_width, -1.0)
return seg._spawn(data=inverted)
# High and low pass filters based on implementation found on Stack Overflow:
# http://stackoverflow.com/questions/13882038/implementing-simple-high-and-low-pass-filters-in-c
@register_pydub_effect
def low_pass_filter(seg, cutoff):
"""
cutoff - Frequency (in Hz) where higher frequency signal will begin to
be reduced by 6dB per octave (doubling in frequency) above this point
"""
RC = 1.0 / (cutoff * 2 * math.pi)
dt = 1.0 / seg.frame_rate
alpha = dt / (RC + dt)
array_type = get_array_type(seg.sample_width * 8)
original = array.array(array_type, seg._data)
filteredArray = array.array(array_type, original)
frame_count = int(seg.frame_count())
last_val = [0] * seg.channels
for i in range(seg.channels):
last_val[i] = filteredArray[i] = original[i]
for i in range(1, frame_count):
for j in range(seg.channels):
offset = (i * seg.channels) + j
last_val[j] = last_val[j] + (alpha * (original[offset] - last_val[j]))
filteredArray[offset] = int(last_val[j])
return seg._spawn(data=filteredArray.tostring())
@register_pydub_effect
def high_pass_filter(seg, cutoff):
"""
cutoff - Frequency (in Hz) where lower frequency signal will begin to
be reduced by 6dB per octave (doubling in frequency) below this point
"""
RC = 1.0 / (cutoff * 2 * math.pi)
dt = 1.0 / seg.frame_rate
alpha = RC / (RC + dt)
array_type = get_array_type(seg.sample_width * 8)
minval, maxval = get_min_max_value(seg.sample_width * 8)
original = array.array(array_type, seg._data)
filteredArray = array.array(array_type, original)
frame_count = int(seg.frame_count())
last_val = [0] * seg.channels
for i in range(seg.channels):
last_val[i] = filteredArray[i] = original[i]
for i in range(1, frame_count):
for j in range(seg.channels):
offset = (i * seg.channels) + j
offset_minus_1 = ((i-1) * seg.channels) + j
last_val[j] = alpha * (last_val[j] + original[offset] - original[offset_minus_1])
filteredArray[offset] = int(min(max(last_val[j], minval), maxval))
return seg._spawn(data=filteredArray.tostring())
@register_pydub_effect
def pan(seg, pan_amount):
"""
pan_amount should be between -1.0 (100% left) and +1.0 (100% right)
When pan_amount == 0.0 the left/right balance is not changed.
Panning does not alter the *perceived* loundness, but since loudness
is decreasing on one side, the other side needs to get louder to
compensate. When panned hard left, the left channel will be 3dB louder.
"""
if not -1.0 <= pan_amount <= 1.0:
raise ValueError("pan_amount should be between -1.0 (100% left) and +1.0 (100% right)")
max_boost_db = ratio_to_db(2.0)
boost_db = abs(pan_amount) * max_boost_db
boost_factor = db_to_float(boost_db)
reduce_factor = db_to_float(max_boost_db) - boost_factor
reduce_db = ratio_to_db(reduce_factor)
# Cut boost in half (max boost== 3dB) - in reality 2 speakers
# do not sum to a full 6 dB.
boost_db = boost_db / 2.0
if pan_amount < 0:
return seg.apply_gain_stereo(boost_db, reduce_db)
else:
return seg.apply_gain_stereo(reduce_db, boost_db)
@register_pydub_effect
def apply_gain_stereo(seg, left_gain=0.0, right_gain=0.0):
"""
left_gain - amount of gain to apply to the left channel (in dB)
right_gain - amount of gain to apply to the right channel (in dB)
note: mono audio segments will be converted to stereo
"""
if seg.channels == 1:
left = right = seg
elif seg.channels == 2:
left, right = seg.split_to_mono()
l_mult_factor = db_to_float(left_gain)
r_mult_factor = db_to_float(right_gain)
left_data = audioop.mul(left._data, left.sample_width, l_mult_factor)
left_data = audioop.tostereo(left_data, left.sample_width, 1, 0)
right_data = audioop.mul(right._data, right.sample_width, r_mult_factor)
right_data = audioop.tostereo(right_data, right.sample_width, 0, 1)
output = audioop.add(left_data, right_data, seg.sample_width)
return seg._spawn(data=output,
overrides={'channels': 2,
'frame_width': 2 * seg.sample_width})
| |
# sql/coercions.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from __future__ import annotations
import collections.abc as collections_abc
import numbers
import re
import typing
from typing import Any
from typing import Any as TODO_Any
from typing import Optional
from typing import Type
from typing import TypeVar
from . import operators
from . import roles
from . import visitors
from .base import ExecutableOption
from .base import Options
from .cache_key import HasCacheKey
from .visitors import Visitable
from .. import exc
from .. import inspection
from .. import util
if not typing.TYPE_CHECKING:
elements = None
lambdas = None
schema = None
selectable = None
traversals = None
if typing.TYPE_CHECKING:
from . import elements
from . import lambdas
from . import schema
from . import selectable
from . import traversals
from .elements import ClauseElement
_SR = TypeVar("_SR", bound=roles.SQLRole)
_StringOnlyR = TypeVar("_StringOnlyR", bound=roles.StringRole)
def _is_literal(element):
"""Return whether or not the element is a "literal" in the context
of a SQL expression construct.
"""
return (
not isinstance(
element,
(Visitable, schema.SchemaEventTarget),
)
and not hasattr(element, "__clause_element__")
)
def _deep_is_literal(element):
"""Return whether or not the element is a "literal" in the context
of a SQL expression construct.
does a deeper more esoteric check than _is_literal. is used
for lambda elements that have to distinguish values that would
be bound vs. not without any context.
"""
if isinstance(element, collections_abc.Sequence) and not isinstance(
element, str
):
for elem in element:
if not _deep_is_literal(elem):
return False
else:
return True
return (
not isinstance(
element,
(
Visitable,
schema.SchemaEventTarget,
HasCacheKey,
Options,
util.langhelpers._symbol,
),
)
and not hasattr(element, "__clause_element__")
and (
not isinstance(element, type)
or not issubclass(element, HasCacheKey)
)
)
def _document_text_coercion(paramname, meth_rst, param_rst):
return util.add_parameter_text(
paramname,
(
".. warning:: "
"The %s argument to %s can be passed as a Python string argument, "
"which will be treated "
"as **trusted SQL text** and rendered as given. **DO NOT PASS "
"UNTRUSTED INPUT TO THIS PARAMETER**."
)
% (param_rst, meth_rst),
)
def _expression_collection_was_a_list(attrname, fnname, args):
if args and isinstance(args[0], (list, set, dict)) and len(args) == 1:
if isinstance(args[0], list):
raise exc.ArgumentError(
f'The "{attrname}" argument to {fnname}(), when '
"referring to a sequence "
"of items, is now passed as a series of positional "
"elements, rather than as a list. "
)
return args[0]
return args
# TODO; would like to have overloads here, however mypy is being extremely
# pedantic about them. not sure why pylance is OK with them.
def expect(
role: Type[_SR],
element: Any,
*,
apply_propagate_attrs: Optional["ClauseElement"] = None,
argname: Optional[str] = None,
post_inspect: bool = False,
**kw: Any,
) -> TODO_Any:
if (
role.allows_lambda
# note callable() will not invoke a __getattr__() method, whereas
# hasattr(obj, "__call__") will. by keeping the callable() check here
# we prevent most needless calls to hasattr() and therefore
# __getattr__(), which is present on ColumnElement.
and callable(element)
and hasattr(element, "__code__")
):
return lambdas.LambdaElement(
element,
role,
lambdas.LambdaOptions(**kw),
apply_propagate_attrs=apply_propagate_attrs,
)
# major case is that we are given a ClauseElement already, skip more
# elaborate logic up front if possible
impl = _impl_lookup[role]
original_element = element
if not isinstance(
element,
(elements.ClauseElement, schema.SchemaItem, schema.FetchedValue),
):
resolved = None
if impl._resolve_literal_only:
resolved = impl._literal_coercion(element, **kw)
else:
original_element = element
is_clause_element = False
# this is a special performance optimization for ORM
# joins used by JoinTargetImpl that we don't go through the
# work of creating __clause_element__() when we only need the
# original QueryableAttribute, as the former will do clause
# adaption and all that which is just thrown away here.
if (
impl._skip_clauseelement_for_target_match
and isinstance(element, role)
and hasattr(element, "__clause_element__")
):
is_clause_element = True
else:
while hasattr(element, "__clause_element__"):
is_clause_element = True
if not getattr(element, "is_clause_element", False):
element = element.__clause_element__()
else:
break
if not is_clause_element:
if impl._use_inspection:
insp = inspection.inspect(element, raiseerr=False)
if insp is not None:
if post_inspect:
insp._post_inspect
try:
resolved = insp.__clause_element__()
except AttributeError:
impl._raise_for_expected(original_element, argname)
if resolved is None:
resolved = impl._literal_coercion(
element, argname=argname, **kw
)
else:
resolved = element
else:
resolved = element
if (
apply_propagate_attrs is not None
and not apply_propagate_attrs._propagate_attrs
and resolved._propagate_attrs
):
apply_propagate_attrs._propagate_attrs = resolved._propagate_attrs
if impl._role_class in resolved.__class__.__mro__:
if impl._post_coercion:
resolved = impl._post_coercion(
resolved,
argname=argname,
original_element=original_element,
**kw,
)
return resolved
else:
return impl._implicit_coercions(
original_element, resolved, argname=argname, **kw
)
def expect_as_key(role, element, **kw):
kw["as_key"] = True
return expect(role, element, **kw)
def expect_col_expression_collection(role, expressions):
for expr in expressions:
strname = None
column = None
resolved = expect(role, expr)
if isinstance(resolved, str):
strname = resolved = expr
else:
cols = []
visitors.traverse(resolved, {}, {"column": cols.append})
if cols:
column = cols[0]
add_element = column if column is not None else strname
yield resolved, column, strname, add_element
class RoleImpl:
__slots__ = ("_role_class", "name", "_use_inspection")
def _literal_coercion(self, element, **kw):
raise NotImplementedError()
_post_coercion = None
_resolve_literal_only = False
_skip_clauseelement_for_target_match = False
def __init__(self, role_class):
self._role_class = role_class
self.name = role_class._role_name
self._use_inspection = issubclass(role_class, roles.UsesInspection)
def _implicit_coercions(
self, element, resolved, argname=None, **kw
) -> Any:
self._raise_for_expected(element, argname, resolved)
def _raise_for_expected(
self,
element,
argname=None,
resolved=None,
advice=None,
code=None,
err=None,
):
if resolved is not None and resolved is not element:
got = "%r object resolved from %r object" % (resolved, element)
else:
got = repr(element)
if argname:
msg = "%s expected for argument %r; got %s." % (
self.name,
argname,
got,
)
else:
msg = "%s expected, got %s." % (self.name, got)
if advice:
msg += " " + advice
raise exc.ArgumentError(msg, code=code) from err
class _Deannotate:
__slots__ = ()
def _post_coercion(self, resolved, **kw):
from .util import _deep_deannotate
return _deep_deannotate(resolved)
class _StringOnly:
__slots__ = ()
_resolve_literal_only = True
class _ReturnsStringKey:
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(original_element, str):
return original_element
else:
self._raise_for_expected(original_element, argname, resolved)
def _literal_coercion(self, element, **kw):
return element
class _ColumnCoercions:
__slots__ = ()
def _warn_for_scalar_subquery_coercion(self):
util.warn(
"implicitly coercing SELECT object to scalar subquery; "
"please use the .scalar_subquery() method to produce a scalar "
"subquery.",
)
def _implicit_coercions(self, element, resolved, argname=None, **kw):
original_element = element
if not getattr(resolved, "is_clause_element", False):
self._raise_for_expected(original_element, argname, resolved)
elif resolved._is_select_statement:
self._warn_for_scalar_subquery_coercion()
return resolved.scalar_subquery()
elif resolved._is_from_clause and isinstance(
resolved, selectable.Subquery
):
self._warn_for_scalar_subquery_coercion()
return resolved.element.scalar_subquery()
elif self._role_class.allows_lambda and resolved._is_lambda_element:
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
def _no_text_coercion(
element, argname=None, exc_cls=exc.ArgumentError, extra=None, err=None
):
raise exc_cls(
"%(extra)sTextual SQL expression %(expr)r %(argname)sshould be "
"explicitly declared as text(%(expr)r)"
% {
"expr": util.ellipses_string(element),
"argname": "for argument %s" % (argname,) if argname else "",
"extra": "%s " % extra if extra else "",
}
) from err
class _NoTextCoercion:
__slots__ = ()
def _literal_coercion(self, element, argname=None, **kw):
if isinstance(element, str) and issubclass(
elements.TextClause, self._role_class
):
_no_text_coercion(element, argname)
else:
self._raise_for_expected(element, argname)
class _CoerceLiterals:
__slots__ = ()
_coerce_consts = False
_coerce_star = False
_coerce_numerics = False
def _text_coercion(self, element, argname=None):
return _no_text_coercion(element, argname)
def _literal_coercion(self, element, argname=None, **kw):
if isinstance(element, str):
if self._coerce_star and element == "*":
return elements.ColumnClause("*", is_literal=True)
else:
return self._text_coercion(element, argname, **kw)
if self._coerce_consts:
if element is None:
return elements.Null()
elif element is False:
return elements.False_()
elif element is True:
return elements.True_()
if self._coerce_numerics and isinstance(element, (numbers.Number)):
return elements.ColumnClause(str(element), is_literal=True)
self._raise_for_expected(element, argname)
class LiteralValueImpl(RoleImpl):
_resolve_literal_only = True
def _implicit_coercions(
self, element, resolved, argname, type_=None, **kw
):
if not _is_literal(resolved):
self._raise_for_expected(
element, resolved=resolved, argname=argname, **kw
)
return elements.BindParameter(None, element, type_=type_, unique=True)
def _literal_coercion(self, element, argname=None, type_=None, **kw):
return element
class _SelectIsNotFrom:
__slots__ = ()
def _raise_for_expected(
self, element, argname=None, resolved=None, advice=None, **kw
):
if (
not advice
and isinstance(element, roles.SelectStatementRole)
or isinstance(resolved, roles.SelectStatementRole)
):
advice = (
"To create a "
"FROM clause from a %s object, use the .subquery() method."
% (resolved.__class__ if resolved is not None else element,)
)
code = "89ve"
else:
code = None
return super(_SelectIsNotFrom, self)._raise_for_expected(
element,
argname=argname,
resolved=resolved,
advice=advice,
code=code,
**kw,
)
class HasCacheKeyImpl(RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(original_element, traversals.HasCacheKey):
return original_element
else:
self._raise_for_expected(original_element, argname, resolved)
def _literal_coercion(self, element, **kw):
return element
class ExecutableOptionImpl(RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(original_element, ExecutableOption):
return original_element
else:
self._raise_for_expected(original_element, argname, resolved)
def _literal_coercion(self, element, **kw):
return element
class ExpressionElementImpl(_ColumnCoercions, RoleImpl):
__slots__ = ()
def _literal_coercion(
self, element, name=None, type_=None, argname=None, is_crud=False, **kw
):
if (
element is None
and not is_crud
and (type_ is None or not type_.should_evaluate_none)
):
# TODO: there's no test coverage now for the
# "should_evaluate_none" part of this, as outside of "crud" this
# codepath is not normally used except in some special cases
return elements.Null()
else:
try:
return elements.BindParameter(
name, element, type_, unique=True, _is_crud=is_crud
)
except exc.ArgumentError as err:
self._raise_for_expected(element, err=err)
def _raise_for_expected(self, element, argname=None, resolved=None, **kw):
if isinstance(element, roles.AnonymizedFromClauseRole):
advice = (
"To create a "
"column expression from a FROM clause row "
"as a whole, use the .table_valued() method."
)
else:
advice = None
return super(ExpressionElementImpl, self)._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
class BinaryElementImpl(ExpressionElementImpl, RoleImpl):
__slots__ = ()
def _literal_coercion(
self, element, expr, operator, bindparam_type=None, argname=None, **kw
):
try:
return expr._bind_param(operator, element, type_=bindparam_type)
except exc.ArgumentError as err:
self._raise_for_expected(element, err=err)
def _post_coercion(self, resolved, expr, **kw):
if resolved.type._isnull and not expr.type._isnull:
resolved = resolved._with_binary_element_type(expr.type)
return resolved
class InElementImpl(RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if resolved._is_from_clause:
if (
isinstance(resolved, selectable.Alias)
and resolved.element._is_select_statement
):
self._warn_for_implicit_coercion(resolved)
return self._post_coercion(resolved.element, **kw)
else:
self._warn_for_implicit_coercion(resolved)
return self._post_coercion(resolved.select(), **kw)
else:
self._raise_for_expected(original_element, argname, resolved)
def _warn_for_implicit_coercion(self, elem):
util.warn(
"Coercing %s object into a select() for use in IN(); "
"please pass a select() construct explicitly"
% (elem.__class__.__name__)
)
def _literal_coercion(self, element, expr, operator, **kw):
if isinstance(element, collections_abc.Iterable) and not isinstance(
element, str
):
non_literal_expressions = {}
element = list(element)
for o in element:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
self._raise_for_expected(element, **kw)
else:
non_literal_expressions[o] = o
elif o is None:
non_literal_expressions[o] = elements.Null()
if non_literal_expressions:
return elements.ClauseList(
*[
non_literal_expressions[o]
if o in non_literal_expressions
else expr._bind_param(operator, o)
for o in element
]
)
else:
return expr._bind_param(operator, element, expanding=True)
else:
self._raise_for_expected(element, **kw)
def _post_coercion(self, element, expr, operator, **kw):
if element._is_select_statement:
# for IN, we are doing scalar_subquery() coercion without
# a warning
return element.scalar_subquery()
elif isinstance(element, elements.ClauseList):
assert not len(element.clauses) == 0
return element.self_group(against=operator)
elif isinstance(element, elements.BindParameter):
element = element._clone(maintain_key=True)
element.expanding = True
element.expand_op = operator
return element
else:
return element
class OnClauseImpl(_ColumnCoercions, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _literal_coercion(
self, element, name=None, type_=None, argname=None, is_crud=False, **kw
):
self._raise_for_expected(element)
def _post_coercion(self, resolved, original_element=None, **kw):
# this is a hack right now as we want to use coercion on an
# ORM InstrumentedAttribute, but we want to return the object
# itself if it is one, not its clause element.
# ORM context _join and _legacy_join() would need to be improved
# to look for annotations in a clause element form.
if isinstance(original_element, roles.JoinTargetRole):
return original_element
return resolved
class WhereHavingImpl(_CoerceLiterals, _ColumnCoercions, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
return _no_text_coercion(element, argname)
class StatementOptionImpl(_CoerceLiterals, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
return elements.TextClause(element)
class ColumnArgumentImpl(_NoTextCoercion, RoleImpl):
__slots__ = ()
class ColumnArgumentOrKeyImpl(_ReturnsStringKey, RoleImpl):
__slots__ = ()
class StrAsPlainColumnImpl(_CoerceLiterals, RoleImpl):
__slots__ = ()
def _text_coercion(self, element, argname=None):
return elements.ColumnClause(element)
class ByOfImpl(_CoerceLiterals, _ColumnCoercions, RoleImpl, roles.ByOfRole):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
return elements._textual_label_reference(element)
class OrderByImpl(ByOfImpl, RoleImpl):
__slots__ = ()
def _post_coercion(self, resolved, **kw):
if (
isinstance(resolved, self._role_class)
and resolved._order_by_label_element is not None
):
return elements._label_reference(resolved)
else:
return resolved
class GroupByImpl(ByOfImpl, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(resolved, roles.StrictFromClauseRole):
return elements.ClauseList(*resolved.c)
else:
return resolved
class DMLColumnImpl(_ReturnsStringKey, RoleImpl):
__slots__ = ()
def _post_coercion(self, element, as_key=False, **kw):
if as_key:
return element.key
else:
return element
class ConstExprImpl(RoleImpl):
__slots__ = ()
def _literal_coercion(self, element, argname=None, **kw):
if element is None:
return elements.Null()
elif element is False:
return elements.False_()
elif element is True:
return elements.True_()
else:
self._raise_for_expected(element, argname)
class TruncatedLabelImpl(_StringOnly, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(original_element, str):
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
def _literal_coercion(self, element, argname=None, **kw):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(element, elements._truncated_label):
return element
else:
return elements._truncated_label(element)
class DDLExpressionImpl(_Deannotate, _CoerceLiterals, RoleImpl):
__slots__ = ()
_coerce_consts = True
def _text_coercion(self, element, argname=None):
# see #5754 for why we can't easily deprecate this coercion.
# essentially expressions like postgresql_where would have to be
# text() as they come back from reflection and we don't want to
# have text() elements wired into the inspection dictionaries.
return elements.TextClause(element)
class DDLConstraintColumnImpl(_Deannotate, _ReturnsStringKey, RoleImpl):
__slots__ = ()
class DDLReferredColumnImpl(DDLConstraintColumnImpl):
__slots__ = ()
class LimitOffsetImpl(RoleImpl):
__slots__ = ()
def _implicit_coercions(self, element, resolved, argname=None, **kw):
if resolved is None:
return None
else:
self._raise_for_expected(element, argname, resolved)
def _literal_coercion(self, element, name, type_, **kw):
if element is None:
return None
else:
value = util.asint(element)
return selectable._OffsetLimitParam(
name, value, type_=type_, unique=True
)
class LabeledColumnExprImpl(ExpressionElementImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if isinstance(resolved, roles.ExpressionElementRole):
return resolved.label(None)
else:
new = super(LabeledColumnExprImpl, self)._implicit_coercions(
original_element, resolved, argname=argname, **kw
)
if isinstance(new, roles.ExpressionElementRole):
return new.label(None)
else:
self._raise_for_expected(original_element, argname, resolved)
class ColumnsClauseImpl(_SelectIsNotFrom, _CoerceLiterals, RoleImpl):
__slots__ = ()
_coerce_consts = True
_coerce_numerics = True
_coerce_star = True
_guess_straight_column = re.compile(r"^\w\S*$", re.I)
def _raise_for_expected(
self, element, argname=None, resolved=None, advice=None, **kw
):
if not advice and isinstance(element, list):
advice = (
f"Did you mean to say select("
f"{', '.join(repr(e) for e in element)})?"
)
return super(ColumnsClauseImpl, self)._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
def _text_coercion(self, element, argname=None):
element = str(element)
guess_is_literal = not self._guess_straight_column.match(element)
raise exc.ArgumentError(
"Textual column expression %(column)r %(argname)sshould be "
"explicitly declared with text(%(column)r), "
"or use %(literal_column)s(%(column)r) "
"for more specificity"
% {
"column": util.ellipses_string(element),
"argname": "for argument %s" % (argname,) if argname else "",
"literal_column": "literal_column"
if guess_is_literal
else "column",
}
)
class ReturnsRowsImpl(RoleImpl):
__slots__ = ()
class StatementImpl(_CoerceLiterals, RoleImpl):
__slots__ = ()
def _post_coercion(self, resolved, original_element, argname=None, **kw):
if resolved is not original_element and not isinstance(
original_element, str
):
# use same method as Connection uses; this will later raise
# ObjectNotExecutableError
try:
original_element._execute_on_connection
except AttributeError:
util.warn_deprecated(
"Object %r should not be used directly in a SQL statement "
"context, such as passing to methods such as "
"session.execute(). This usage will be disallowed in a "
"future release. "
"Please use Core select() / update() / delete() etc. "
"with Session.execute() and other statement execution "
"methods." % original_element,
"1.4",
)
return resolved
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if resolved._is_lambda_element:
return resolved
else:
return super(StatementImpl, self)._implicit_coercions(
original_element, resolved, argname=argname, **kw
)
class SelectStatementImpl(_NoTextCoercion, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if resolved._is_text_clause:
return resolved.columns()
else:
self._raise_for_expected(original_element, argname, resolved)
class HasCTEImpl(ReturnsRowsImpl):
__slots__ = ()
class IsCTEImpl(RoleImpl):
__slots__ = ()
class JoinTargetImpl(RoleImpl):
__slots__ = ()
_skip_clauseelement_for_target_match = True
def _literal_coercion(self, element, argname=None, **kw):
self._raise_for_expected(element, argname)
def _implicit_coercions(
self, original_element, resolved, argname=None, legacy=False, **kw
):
if isinstance(original_element, roles.JoinTargetRole):
# note that this codepath no longer occurs as of
# #6550, unless JoinTargetImpl._skip_clauseelement_for_target_match
# were set to False.
return original_element
elif legacy and resolved._is_select_statement:
util.warn_deprecated(
"Implicit coercion of SELECT and textual SELECT "
"constructs into FROM clauses is deprecated; please call "
".subquery() on any Core select or ORM Query object in "
"order to produce a subquery object.",
version="1.4",
)
# TODO: doing _implicit_subquery here causes tests to fail,
# how was this working before? probably that ORM
# join logic treated it as a select and subquery would happen
# in _ORMJoin->Join
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
class FromClauseImpl(_SelectIsNotFrom, _NoTextCoercion, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self,
original_element,
resolved,
argname=None,
explicit_subquery=False,
allow_select=True,
**kw,
):
if resolved._is_select_statement:
if explicit_subquery:
return resolved.subquery()
elif allow_select:
util.warn_deprecated(
"Implicit coercion of SELECT and textual SELECT "
"constructs into FROM clauses is deprecated; please call "
".subquery() on any Core select or ORM Query object in "
"order to produce a subquery object.",
version="1.4",
)
return resolved._implicit_subquery
elif resolved._is_text_clause:
return resolved
else:
self._raise_for_expected(original_element, argname, resolved)
def _post_coercion(self, element, deannotate=False, **kw):
if deannotate:
return element._deannotate()
else:
return element
class StrictFromClauseImpl(FromClauseImpl):
__slots__ = ()
def _implicit_coercions(
self,
original_element,
resolved,
argname=None,
allow_select=False,
**kw,
):
if resolved._is_select_statement and allow_select:
util.warn_deprecated(
"Implicit coercion of SELECT and textual SELECT constructs "
"into FROM clauses is deprecated; please call .subquery() "
"on any Core select or ORM Query object in order to produce a "
"subquery object.",
version="1.4",
)
return resolved._implicit_subquery
else:
self._raise_for_expected(original_element, argname, resolved)
class AnonymizedFromClauseImpl(StrictFromClauseImpl):
__slots__ = ()
def _post_coercion(self, element, flat=False, name=None, **kw):
assert name is None
return element._anonymous_fromclause(flat=flat)
class DMLTableImpl(_SelectIsNotFrom, _NoTextCoercion, RoleImpl):
__slots__ = ()
def _post_coercion(self, element, **kw):
if "dml_table" in element._annotations:
return element._annotations["dml_table"]
else:
return element
class DMLSelectImpl(_NoTextCoercion, RoleImpl):
__slots__ = ()
def _implicit_coercions(
self, original_element, resolved, argname=None, **kw
):
if resolved._is_from_clause:
if (
isinstance(resolved, selectable.Alias)
and resolved.element._is_select_statement
):
return resolved.element
else:
return resolved.select()
else:
self._raise_for_expected(original_element, argname, resolved)
class CompoundElementImpl(_NoTextCoercion, RoleImpl):
__slots__ = ()
def _raise_for_expected(self, element, argname=None, resolved=None, **kw):
if isinstance(element, roles.FromClauseRole):
if element._is_subquery:
advice = (
"Use the plain select() object without "
"calling .subquery() or .alias()."
)
else:
advice = (
"To SELECT from any FROM clause, use the .select() method."
)
else:
advice = None
return super(CompoundElementImpl, self)._raise_for_expected(
element, argname=argname, resolved=resolved, advice=advice, **kw
)
_impl_lookup = {}
for name in dir(roles):
cls = getattr(roles, name)
if name.endswith("Role"):
name = name.replace("Role", "Impl")
if name in globals():
impl = globals()[name](cls)
_impl_lookup[cls] = impl
| |
#!/usr/bin/env python
# Copyright 2014 Stanford University and Los Alamos National Security, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###
### Leaf Task Analysis
###
### Leaf task analysis computes, for each task, whether that task can
### be declared as a leaf task or not. Leaf tasks must not make any
### calls into the Legion runtime. At the language level, this means
### leaf tasks must avoid:
###
### * creating regions, index spaces, arrays, or partitions
### * calling tasks
### * downregion (generates a call to safe_cast)
### * new (generates a call to create_index_allocator)
###
### When aggressive leaf task optimization is enabled, this pass
### allows the following features in leaf tasks, despite being
### potentially unsafe. The user must disable debug mode when using
### this optimization, in order to avoid assertion failures from
### illegal runtime calls.
###
### * for (generates a call to get_index_space_domain)
###
# Backport of singledispatch to Python 2.x.
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
import copy
from . import ast, types
class Context:
def __init__(self, opts, type_map):
self.opts = opts
self.type_map = type_map
self.leaf_tasks = {}
# Returns true if a function is safe to call in the context of a leaf
# task. Functions are considered safe if:
#
# * The function is a foreign function.
# * The function does not take a ForeignContext parameter.
def is_function_safe(function_type, cx):
if not types.is_foreign_function(function_type):
return False
for arg in function_type.foreign_param_types:
if types.is_foreign_context(arg):
return False
return True
@singledispatch
def leaf_task_analysis_node(node, cx):
raise Exception('Leaf task analysis failed at %s' % node)
@leaf_task_analysis_node.register(ast.Program)
def _(node, cx):
leaf_task_analysis_node(node.definitions, cx)
return
@leaf_task_analysis_node.register(ast.Definitions)
def _(node, cx):
for definition in node.definitions:
leaf_task_analysis_node(definition, cx)
return
@leaf_task_analysis_node.register(ast.Import)
def _(node, cx):
return
@leaf_task_analysis_node.register(ast.Struct)
def _(node, cx):
return
@leaf_task_analysis_node.register(ast.Function)
def _(node, cx):
leaf_task = leaf_task_analysis_node(node.block, cx)
assert leaf_task in (True, False)
cx.leaf_tasks[node] = leaf_task
return
@leaf_task_analysis_node.register(ast.Block)
def _(node, cx):
return all(
leaf_task_analysis_node(expr, cx)
for expr in node.block)
@leaf_task_analysis_node.register(ast.StatementAssert)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.StatementExpr)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.StatementIf)
def _(node, cx):
return (
leaf_task_analysis_node(node.condition, cx) and
leaf_task_analysis_node(node.then_block, cx) and
(leaf_task_analysis_node(node.else_block, cx)
if node.else_block is not None else True))
@leaf_task_analysis_node.register(ast.StatementFor)
def _(node, cx):
if not cx.opts.leaf_task_optimization:
return False
return leaf_task_analysis_node(node.block, cx)
@leaf_task_analysis_node.register(ast.StatementLet)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.StatementLetRegion)
def _(node, cx):
return False
@leaf_task_analysis_node.register(ast.StatementLetArray)
def _(node, cx):
return False
@leaf_task_analysis_node.register(ast.StatementLetIspace)
def _(node, cx):
return False
@leaf_task_analysis_node.register(ast.StatementLetPartition)
def _(node, cx):
return False
@leaf_task_analysis_node.register(ast.StatementReturn)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.StatementUnpack)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.StatementVar)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.StatementWhile)
def _(node, cx):
return (
leaf_task_analysis_node(node.condition, cx) and
leaf_task_analysis_node(node.block, cx))
@leaf_task_analysis_node.register(ast.ExprID)
def _(node, cx):
return True
@leaf_task_analysis_node.register(ast.ExprAssignment)
def _(node, cx):
return (
leaf_task_analysis_node(node.lval, cx) and
leaf_task_analysis_node(node.rval, cx))
@leaf_task_analysis_node.register(ast.ExprUnaryOp)
def _(node, cx):
return leaf_task_analysis_node(node.arg, cx)
@leaf_task_analysis_node.register(ast.ExprBinaryOp)
def _(node, cx):
return (
leaf_task_analysis_node(node.lhs, cx) and
leaf_task_analysis_node(node.rhs, cx))
@leaf_task_analysis_node.register(ast.ExprReduceOp)
def _(node, cx):
return (
leaf_task_analysis_node(node.lhs, cx) and
leaf_task_analysis_node(node.rhs, cx))
@leaf_task_analysis_node.register(ast.ExprCast)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.ExprNull)
def _(node, cx):
return True
@leaf_task_analysis_node.register(ast.ExprIsnull)
def _(node, cx):
return leaf_task_analysis_node(node.pointer_expr, cx)
@leaf_task_analysis_node.register(ast.ExprNew)
def _(node, cx):
return False
@leaf_task_analysis_node.register(ast.ExprRead)
def _(node, cx):
return leaf_task_analysis_node(node.pointer_expr, cx)
@leaf_task_analysis_node.register(ast.ExprWrite)
def _(node, cx):
return (
leaf_task_analysis_node(node.pointer_expr, cx) and
leaf_task_analysis_node(node.value_expr, cx))
@leaf_task_analysis_node.register(ast.ExprReduce)
def _(node, cx):
return (
leaf_task_analysis_node(node.pointer_expr, cx) and
leaf_task_analysis_node(node.value_expr, cx))
@leaf_task_analysis_node.register(ast.ExprDereference)
def _(node, cx):
return leaf_task_analysis_node(node.pointer_expr, cx)
@leaf_task_analysis_node.register(ast.ExprArrayAccess)
def _(node, cx):
return (
leaf_task_analysis_node(node.array_expr, cx) and
leaf_task_analysis_node(node.index_expr, cx))
@leaf_task_analysis_node.register(ast.ExprFieldAccess)
def _(node, cx):
return leaf_task_analysis_node(node.struct_expr, cx)
@leaf_task_analysis_node.register(ast.ExprFieldDereference)
def _(node, cx):
return leaf_task_analysis_node(node.pointer_expr, cx)
@leaf_task_analysis_node.register(ast.ExprFieldValues)
def _(node, cx):
return leaf_task_analysis_node(node.field_values, cx)
@leaf_task_analysis_node.register(ast.FieldValues)
def _(node, cx):
return all(leaf_task_analysis_node(field_value, cx)
for field_value in node.field_values)
@leaf_task_analysis_node.register(ast.FieldValue)
def _(node, cx):
return leaf_task_analysis_node(node.value_expr, cx)
@leaf_task_analysis_node.register(ast.ExprFieldUpdates)
def _(node, cx):
return (
leaf_task_analysis_node(node.struct_expr, cx) and
leaf_task_analysis_node(node.field_updates, cx))
@leaf_task_analysis_node.register(ast.FieldUpdates)
def _(node, cx):
return all(leaf_task_analysis_node(field_update, cx)
for field_update in node.field_updates)
@leaf_task_analysis_node.register(ast.FieldUpdate)
def _(node, cx):
return leaf_task_analysis_node(node.update_expr, cx)
@leaf_task_analysis_node.register(ast.ExprColoring)
def _(node, cx):
return True
@leaf_task_analysis_node.register(ast.ExprColor)
def _(node, cx):
return (
leaf_task_analysis_node(node.coloring_expr, cx) and
leaf_task_analysis_node(node.pointer_expr, cx) and
leaf_task_analysis_node(node.color_expr, cx))
@leaf_task_analysis_node.register(ast.ExprUpregion)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.ExprDownregion)
def _(node, cx):
return False
@leaf_task_analysis_node.register(ast.ExprPack)
def _(node, cx):
return leaf_task_analysis_node(node.expr, cx)
@leaf_task_analysis_node.register(ast.ExprCall)
def _(node, cx):
if not is_function_safe(cx.type_map[node.function], cx):
return False
return leaf_task_analysis_node(node.args, cx)
@leaf_task_analysis_node.register(ast.Args)
def _(node, cx):
return all(leaf_task_analysis_node(arg, cx)
for arg in node.args)
@leaf_task_analysis_node.register(ast.ExprConstBool)
def _(node, cx):
return True
@leaf_task_analysis_node.register(ast.ExprConstDouble)
def _(node, cx):
return True
@leaf_task_analysis_node.register(ast.ExprConstFloat)
def _(node, cx):
return True
@leaf_task_analysis_node.register(ast.ExprConstInt)
def _(node, cx):
return True
@leaf_task_analysis_node.register(ast.ExprConstUInt)
def _(node, cx):
return True
def leaf_task_analysis(program, opts, type_map):
cx = Context(opts, type_map)
leaf_task_analysis_node(program, cx)
return cx.leaf_tasks
| |
from distutils.version import StrictVersion
from rocker.restclient import Request, SocketError
import getopt
import json
import os
import pkg_resources
import sys
MIN_LABELS_VERSION = "1.17"
# Source: https://svn.blender.org/svnroot/bf-blender/trunk/blender/build_files/scons/tools/bcolors.py
# TODO Maybe use a library for coloring
class Col:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# rocker boilerplate class
class Rocker:
# Rocker constructor
#
# The URL can be either a UNIX socket or an HTTP/HTTPS server address, e.g:
#
# - unix:///var/run/docker.sock <- that's the default value
# - http://localhost:1234/
# - https://localhost:1235/
#
#
# - There are no default ports for HTTP/S sockets
# - HTTP/S URLs will only be parsed for their host and port, the path
# and all other components will be ignored
# - UNIX socket URLs will however ignore everything except the path part.
#def __init__(self, url = 'unix:///var/run/docker.sock'):
def __init__(self, url = None):
if url == None:
# use DOCKER_HOST env var or fallback to default
url = os.getenv('DOCKER_HOST')
if url == None:
url = 'unix:///var/run/docker.sock'
self._url = url
self._lastMsgId = None
self._duplicateIDs = set()
self._msgQueue = []
self._verbosity = 0
self._cachedDockerVersion = None
def checkApiVersion(self, minVersion, failMsg=False):
rc = StrictVersion(self.getDockerVersion()['ApiVersion']) >= StrictVersion(minVersion)
if failMsg and not rc:
self.error(failMsg)
return rc
# Returns a new RestClient instance pointing to the URL given in the constructor
def createRequest(self):
try:
return Request(self._url)
except SocketError as e:
# craft some docker-specific messages
if isinstance(e.cause, FileNotFoundError):
raise SocketError("Couldn't find Docker socket. Either docker is not running or listening somewhere other than '{0}'".format(self._url), e.cause)
elif isinstance(e.cause, PermissionError):
raise SocketError("Can't access Docker socket. Either rerun the command as root (e.g. via sudo) or add this user to the docker group.", e.cause)
else:
raise e
def getDockerVersion(self):
if self._cachedDockerVersion == None:
self._cachedDockerVersion = self.createRequest().doGet("/version").send().getObject()
return self._cachedDockerVersion
def getVerbosity(self):
return self._verbosity
def getopt(self):
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'v')
for opt,_ in opts:
if opt == '-v':
self._verbosity += 1
return args
except getopt.GetoptError as e:
self.error(e, exitCode=1)
def printDockerOutput(self, httpResponse):
while True:
chunk = httpResponse.readChunk()
if chunk == None:
break
chunk = json.loads(chunk)
self.printDockerMessage(chunk)
# Print Docker status messages (with color coding)
#
# This method will print subsequent messages for the same image/container ID in the same line (i.e. overwrite the last message)
def printDockerMessage(self, msgJson):
col = None
msg = None
newline = '\n'
if 'id' in msgJson:
# overwrite lines with the same ID (instead of printing a new one)
if self._lastMsgId == msgJson['id']:
# go back one line (and clear it)
sys.stdout.write('\033[1A\033[K')
# prepend ID
sys.stdout.write("{0}: ".format(msgJson['id']))
# color message depending on type
if 'error' in msgJson:
col = Col.FAIL
msg = msgJson['error']
newline=''
elif 'status' in msgJson:
col = Col.OKBLUE
msg = msgJson['status']
if 'progress' in msgJson:
msg = "{0} {1}".format(msgJson['status'], msgJson['progress'])
elif 'stream' in msgJson:
msg = msgJson['stream']
newline=''
else:
msg = ":: {0}".format(msgJson)
if col != None:
msg = "{0}{1}{2}".format(col, msg, Col.ENDC)
sys.stdout.write("{0}{1}".format(msg, newline))
# update _lastMsgId
if 'id' in msgJson:
self._lastMsgId = msgJson['id']
else:
self._lastMsgId = None
def printQueuedMessages(self):
for msg, stream in self._msgQueue:
self._msg(msg, None, None, stream)
def printVersion(self):
# print our own version first (in case we can't connect to docker)
rockerInfo = pkg_resources.require("rocker")[0]
print("Rocker version: {v}".format(v=rockerInfo.version))
dockerInfo = self.getDockerVersion()
print("Docker version: {Version}".format(**dockerInfo))
self.debug(1, "Docker API version: {ApiVersion}".format(**dockerInfo))
self.debug(1, "Docker Kernel version: {KernelVersion}".format(**dockerInfo))
self.debug(2, "Docker GIT revision: {GitCommit}".format(**dockerInfo))
self.debug(2, "Docker GO version {GoVersion}".format(**dockerInfo))
def _msg(self, msg, col, duplicateId, stream, delayed=False):
if duplicateId != None:
# don't print duplicate messages
if duplicateId in self._duplicateIDs:
return
else:
self._duplicateIDs.add(duplicateId)
if col != None:
msg="{0}{1}{2}".format(col, msg, Col.ENDC)
if delayed:
self._msgQueue.append((msg, stream))
else:
stream.write("{0}\n".format(msg))
def error(self, msg: str, exitCode=1):
self._msg("ERROR: {0}".format(msg), Col.FAIL, None, sys.stderr)
if exitCode != None:
sys.exit(exitCode)
def info(self, msg: str, duplicateId=None, stream=sys.stdout, delayed=False):
self._msg(msg, None, duplicateId, stream, delayed)
def warning(self, msg: str, duplicateId=None):
self._msg(msg, Col.WARNING, duplicateId, sys.stderr)
def debug(self, level, msg, duplicateId=None):
if self._verbosity < level:
return # too verbose
self._msg(msg, None, duplicateId, sys.stdout)
def choice(self, msg, options=['y', 'n'], default='y'):
rc = None
if not default in options:
raise KeyError("default ('{0}') not in options ({1})".format(default, options))
# construct choice string
choices = []
for o in options:
if o == default:
o = o.upper()
choices.append(o)
choices = "({0})".format('/'.join(choices))
while rc == None:
choice = input('{0} {1}: '.format(msg, choices)).lower()
if choice == '': #default
rc = default
elif choice in options:
rc = choice
return rc
| |
import datetime
from pymongo.errors import OperationFailure
try:
import unittest2 as unittest
except ImportError:
import unittest
from nose.plugins.skip import SkipTest
import pymongo
from bson.tz_util import utc
from mongoengine import (
connect, register_connection,
Document, DateTimeField
)
from mongoengine.python_support import IS_PYMONGO_3
import mongoengine.connection
from mongoengine.connection import (MongoEngineConnectionError, get_db,
get_connection)
def get_tz_awareness(connection):
if not IS_PYMONGO_3:
return connection.tz_aware
else:
return connection.codec_options.tz_aware
class ConnectionTest(unittest.TestCase):
def tearDown(self):
mongoengine.connection._connection_settings = {}
mongoengine.connection._connections = {}
mongoengine.connection._dbs = {}
def test_connect(self):
"""Ensure that the connect() method works properly.
"""
connect('mongoenginetest')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
connect('mongoenginetest2', alias='testdb')
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
def test_connect_in_mocking(self):
"""Ensure that the connect() method works properly in mocking.
"""
try:
import mongomock
except ImportError:
raise SkipTest('you need mongomock installed to run this testcase')
connect('mongoenginetest', host='mongomock://localhost')
conn = get_connection()
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect('mongoenginetest2', host='mongomock://localhost', alias='testdb2')
conn = get_connection('testdb2')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect('mongoenginetest3', host='mongodb://localhost', is_mock=True, alias='testdb3')
conn = get_connection('testdb3')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect('mongoenginetest4', is_mock=True, alias='testdb4')
conn = get_connection('testdb4')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect(host='mongodb://localhost:27017/mongoenginetest5', is_mock=True, alias='testdb5')
conn = get_connection('testdb5')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect(host='mongomock://localhost:27017/mongoenginetest6', alias='testdb6')
conn = get_connection('testdb6')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect(host='mongomock://localhost:27017/mongoenginetest7', is_mock=True, alias='testdb7')
conn = get_connection('testdb7')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
def test_connect_with_host_list(self):
"""Ensure that the connect() method works when host is a list
Uses mongomock to test w/o needing multiple mongod/mongos processes
"""
try:
import mongomock
except ImportError:
raise SkipTest('you need mongomock installed to run this testcase')
connect(host=['mongomock://localhost'])
conn = get_connection()
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect(host=['mongodb://localhost'], is_mock=True, alias='testdb2')
conn = get_connection('testdb2')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect(host=['localhost'], is_mock=True, alias='testdb3')
conn = get_connection('testdb3')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect(host=['mongomock://localhost:27017', 'mongomock://localhost:27018'], alias='testdb4')
conn = get_connection('testdb4')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect(host=['mongodb://localhost:27017', 'mongodb://localhost:27018'], is_mock=True, alias='testdb5')
conn = get_connection('testdb5')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
connect(host=['localhost:27017', 'localhost:27018'], is_mock=True, alias='testdb6')
conn = get_connection('testdb6')
self.assertTrue(isinstance(conn, mongomock.MongoClient))
def test_disconnect(self):
"""Ensure that the disconnect() method works properly
"""
conn1 = connect('mongoenginetest')
mongoengine.connection.disconnect()
conn2 = connect('mongoenginetest')
self.assertTrue(conn1 is not conn2)
def test_sharing_connections(self):
"""Ensure that connections are shared when the connection settings are exactly the same
"""
connect('mongoenginetests', alias='testdb1')
expected_connection = get_connection('testdb1')
connect('mongoenginetests', alias='testdb2')
actual_connection = get_connection('testdb2')
# Handle PyMongo 3+ Async Connection
if IS_PYMONGO_3:
# Ensure we are connected, throws ServerSelectionTimeoutError otherwise.
# Purposely not catching exception to fail test if thrown.
expected_connection.server_info()
self.assertEqual(expected_connection, actual_connection)
def test_connect_uri(self):
"""Ensure that the connect() method works properly with uri's
"""
c = connect(db='mongoenginetest', alias='admin')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
c.admin.add_user("admin", "password")
c.admin.authenticate("admin", "password")
c.mongoenginetest.add_user("username", "password")
if not IS_PYMONGO_3:
self.assertRaises(
MongoEngineConnectionError, connect, 'testdb_uri_bad',
host='mongodb://test:password@localhost'
)
connect("testdb_uri", host='mongodb://username:password@localhost/mongoenginetest')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
c.admin.system.users.remove({})
c.mongoenginetest.system.users.remove({})
def test_connect_uri_without_db(self):
"""Ensure connect() method works properly if the URI doesn't
include a database name.
"""
connect("mongoenginetest", host='mongodb://localhost/')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
def test_connect_uri_default_db(self):
"""Ensure connect() defaults to the right database name if
the URI and the database_name don't explicitly specify it.
"""
connect(host='mongodb://localhost/')
conn = get_connection()
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'test')
def test_connect_uri_with_replicaset(self):
"""Ensure connect() works when specifying a replicaSet."""
if IS_PYMONGO_3:
c = connect(host='mongodb://localhost/test?replicaSet=local-rs')
db = get_db()
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'test')
else:
# PyMongo < v3.x raises an exception:
# "localhost:27017 is not a member of replica set local-rs"
with self.assertRaises(MongoEngineConnectionError):
c = connect(host='mongodb://localhost/test?replicaSet=local-rs')
def test_uri_without_credentials_doesnt_override_conn_settings(self):
"""Ensure connect() uses the username & password params if the URI
doesn't explicitly specify them.
"""
c = connect(host='mongodb://localhost/mongoenginetest',
username='user',
password='pass')
# OperationFailure means that mongoengine attempted authentication
# w/ the provided username/password and failed - that's the desired
# behavior. If the MongoDB URI would override the credentials
self.assertRaises(OperationFailure, get_db)
def test_connect_uri_with_authsource(self):
"""Ensure that the connect() method works well with
the option `authSource` in URI.
This feature was introduced in MongoDB 2.4 and removed in 2.6
"""
# Create users
c = connect('mongoenginetest')
c.admin.system.users.remove({})
c.admin.add_user('username2', 'password')
# Authentication fails without "authSource"
if IS_PYMONGO_3:
test_conn = connect('mongoenginetest', alias='test1',
host='mongodb://username2:password@localhost/mongoenginetest')
self.assertRaises(OperationFailure, test_conn.server_info)
else:
self.assertRaises(
MongoEngineConnectionError, connect, 'mongoenginetest',
alias='test1',
host='mongodb://username2:password@localhost/mongoenginetest'
)
self.assertRaises(MongoEngineConnectionError, get_db, 'test1')
# Authentication succeeds with "authSource"
connect(
'mongoenginetest', alias='test2',
host=('mongodb://username2:password@localhost/'
'mongoenginetest?authSource=admin')
)
# This will fail starting from MongoDB 2.6+
db = get_db('test2')
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest')
# Clear all users
c.admin.system.users.remove({})
def test_register_connection(self):
"""Ensure that connections with different aliases may be registered.
"""
register_connection('testdb', 'mongoenginetest2')
self.assertRaises(MongoEngineConnectionError, get_connection)
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
db = get_db('testdb')
self.assertTrue(isinstance(db, pymongo.database.Database))
self.assertEqual(db.name, 'mongoenginetest2')
def test_register_connection_defaults(self):
"""Ensure that defaults are used when the host and port are None.
"""
register_connection('testdb', 'mongoenginetest', host=None, port=None)
conn = get_connection('testdb')
self.assertTrue(isinstance(conn, pymongo.mongo_client.MongoClient))
def test_connection_kwargs(self):
"""Ensure that connection kwargs get passed to pymongo.
"""
connect('mongoenginetest', alias='t1', tz_aware=True)
conn = get_connection('t1')
self.assertTrue(get_tz_awareness(conn))
connect('mongoenginetest2', alias='t2')
conn = get_connection('t2')
self.assertFalse(get_tz_awareness(conn))
def test_datetime(self):
connect('mongoenginetest', tz_aware=True)
d = datetime.datetime(2010, 5, 5, tzinfo=utc)
class DateDoc(Document):
the_date = DateTimeField(required=True)
DateDoc.drop_collection()
DateDoc(the_date=d).save()
date_doc = DateDoc.objects.first()
self.assertEqual(d, date_doc.the_date)
def test_multiple_connection_settings(self):
connect('mongoenginetest', alias='t1', host="localhost")
connect('mongoenginetest2', alias='t2', host="127.0.0.1")
mongo_connections = mongoengine.connection._connections
self.assertEqual(len(mongo_connections.items()), 2)
self.assertTrue('t1' in mongo_connections.keys())
self.assertTrue('t2' in mongo_connections.keys())
if not IS_PYMONGO_3:
self.assertEqual(mongo_connections['t1'].host, 'localhost')
self.assertEqual(mongo_connections['t2'].host, '127.0.0.1')
else:
# Handle PyMongo 3+ Async Connection
# Ensure we are connected, throws ServerSelectionTimeoutError otherwise.
# Purposely not catching exception to fail test if thrown.
mongo_connections['t1'].server_info()
mongo_connections['t2'].server_info()
self.assertEqual(mongo_connections['t1'].address[0], 'localhost')
self.assertEqual(mongo_connections['t2'].address[0], '127.0.0.1')
if __name__ == '__main__':
unittest.main()
| |
"""
self-contained to write legacy storage pickle files
To use this script. Create an environment where you want
generate pickles, say its for 0.20.3, with your pandas clone
in ~/pandas
. activate pandas_0.20.3
cd ~/
$ python pandas/pandas/tests/io/generate_legacy_storage_files.py \
pandas/pandas/tests/io/data/legacy_pickle/0.20.3/ pickle
This script generates a storage file for the current arch, system,
and python version
pandas version: 0.20.3
output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/
storage format: pickle
created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle
The idea here is you are using the *current* version of the
generate_legacy_storage_files with an *older* version of pandas to
generate a pickle file. We will then check this file into a current
branch, and test using test_pickle.py. This will load the *older*
pickles and test versus the current data that is generated
(with master). These are then compared.
If we have cases where we changed the signature (e.g. we renamed
offset -> freq in Timestamp). Then we have to conditionally execute
in the generate_legacy_storage_files.py to make it
run under the older AND the newer version.
"""
from datetime import timedelta
from distutils.version import LooseVersion
import os
import pickle
import platform as pl
import sys
import numpy as np
import pandas
from pandas import (
Categorical,
DataFrame,
Index,
MultiIndex,
NaT,
Period,
RangeIndex,
Series,
Timestamp,
bdate_range,
date_range,
period_range,
timedelta_range,
)
from pandas.tseries.offsets import (
FY5253,
BusinessDay,
BusinessHour,
CustomBusinessDay,
DateOffset,
Day,
Easter,
Hour,
LastWeekOfMonth,
Minute,
MonthBegin,
MonthEnd,
QuarterBegin,
QuarterEnd,
SemiMonthBegin,
SemiMonthEnd,
Week,
WeekOfMonth,
YearBegin,
YearEnd,
)
try:
# TODO: remove try/except when 0.24.0 is the legacy version.
from pandas.arrays import SparseArray
except ImportError:
from pandas.core.sparse.api import SparseArray
_loose_version = LooseVersion(pandas.__version__)
def _create_sp_series():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
bseries = Series(SparseArray(arr, kind="block"))
bseries.name = "bseries"
return bseries
def _create_sp_tsseries():
nan = np.nan
# nan-based
arr = np.arange(15, dtype=np.float64)
arr[7:12] = nan
arr[-1:] = nan
date_index = bdate_range("1/1/2011", periods=len(arr))
bseries = Series(SparseArray(arr, kind="block"), index=date_index)
bseries.name = "btsseries"
return bseries
def _create_sp_frame():
nan = np.nan
data = {
"A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
"B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
"C": np.arange(10).astype(np.int64),
"D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
}
dates = bdate_range("1/1/2011", periods=10)
return DataFrame(data, index=dates).apply(SparseArray)
def create_data():
""" create the pickle data """
data = {
"A": [0.0, 1.0, 2.0, 3.0, np.nan],
"B": [0, 1, 0, 1, 0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": date_range("1/1/2009", periods=5),
"E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
}
scalars = dict(timestamp=Timestamp("20130101"), period=Period("2012", "M"))
index = dict(
int=Index(np.arange(10)),
date=date_range("20130101", periods=10),
period=period_range("2013-01-01", freq="M", periods=10),
float=Index(np.arange(10, dtype=np.float64)),
uint=Index(np.arange(10, dtype=np.uint64)),
timedelta=timedelta_range("00:00:00", freq="30T", periods=10),
)
index["range"] = RangeIndex(10)
if _loose_version >= LooseVersion("0.21"):
from pandas import interval_range
index["interval"] = interval_range(0, periods=10)
mi = dict(
reg2=MultiIndex.from_tuples(
tuple(
zip(
*[
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
)
),
names=["first", "second"],
)
)
series = dict(
float=Series(data["A"]),
int=Series(data["B"]),
mixed=Series(data["E"]),
ts=Series(
np.arange(10).astype(np.int64), index=date_range("20130101", periods=10)
),
mi=Series(
np.arange(5).astype(np.float64),
index=MultiIndex.from_tuples(
tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"]
),
),
dup=Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
cat=Series(Categorical(["foo", "bar", "baz"])),
dt=Series(date_range("20130101", periods=5)),
dt_tz=Series(date_range("20130101", periods=5, tz="US/Eastern")),
period=Series([Period("2000Q1")] * 5),
)
mixed_dup_df = DataFrame(data)
mixed_dup_df.columns = list("ABCDA")
frame = dict(
float=DataFrame({"A": series["float"], "B": series["float"] + 1}),
int=DataFrame({"A": series["int"], "B": series["int"] + 1}),
mixed=DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
mi=DataFrame(
{"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)},
index=MultiIndex.from_tuples(
tuple(
zip(
*[
["bar", "bar", "baz", "baz", "baz"],
["one", "two", "one", "two", "three"],
]
)
),
names=["first", "second"],
),
),
dup=DataFrame(
np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"]
),
cat_onecol=DataFrame({"A": Categorical(["foo", "bar"])}),
cat_and_float=DataFrame(
{
"A": Categorical(["foo", "bar", "baz"]),
"B": np.arange(3).astype(np.int64),
}
),
mixed_dup=mixed_dup_df,
dt_mixed_tzs=DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
},
index=range(5),
),
dt_mixed2_tzs=DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
"C": Timestamp("20130603", tz="UTC"),
},
index=range(5),
),
)
cat = dict(
int8=Categorical(list("abcdefg")),
int16=Categorical(np.arange(1000)),
int32=Categorical(np.arange(10000)),
)
timestamp = dict(
normal=Timestamp("2011-01-01"),
nat=NaT,
tz=Timestamp("2011-01-01", tz="US/Eastern"),
)
timestamp["freq"] = Timestamp("2011-01-01", freq="D")
timestamp["both"] = Timestamp("2011-01-01", tz="Asia/Tokyo", freq="M")
off = {
"DateOffset": DateOffset(years=1),
"DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
"BusinessDay": BusinessDay(offset=timedelta(seconds=9)),
"BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"),
"CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"),
"SemiMonthBegin": SemiMonthBegin(day_of_month=9),
"SemiMonthEnd": SemiMonthEnd(day_of_month=24),
"MonthBegin": MonthBegin(1),
"MonthEnd": MonthEnd(1),
"QuarterBegin": QuarterBegin(1),
"QuarterEnd": QuarterEnd(1),
"Day": Day(1),
"YearBegin": YearBegin(1),
"YearEnd": YearEnd(1),
"Week": Week(1),
"Week_Tues": Week(2, normalize=False, weekday=1),
"WeekOfMonth": WeekOfMonth(week=3, weekday=4),
"LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3),
"FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
"Easter": Easter(),
"Hour": Hour(1),
"Minute": Minute(1),
}
return dict(
series=series,
frame=frame,
index=index,
scalars=scalars,
mi=mi,
sp_series=dict(float=_create_sp_series(), ts=_create_sp_tsseries()),
sp_frame=dict(float=_create_sp_frame()),
cat=cat,
timestamp=timestamp,
offsets=off,
)
def create_pickle_data():
data = create_data()
return data
def platform_name():
return "_".join(
[
str(pandas.__version__),
str(pl.machine()),
str(pl.system().lower()),
str(pl.python_version()),
]
)
def write_legacy_pickles(output_dir):
version = pandas.__version__
print(
"This script generates a storage file for the current arch, system, "
"and python version"
)
print(f" pandas version: {version}")
print(f" output dir : {output_dir}")
print(" storage format: pickle")
pth = f"{platform_name()}.pickle"
fh = open(os.path.join(output_dir, pth), "wb")
pickle.dump(create_pickle_data(), fh, pickle.HIGHEST_PROTOCOL)
fh.close()
print(f"created pickle file: {pth}")
def write_legacy_file():
# force our cwd to be the first searched
sys.path.insert(0, ".")
if not (3 <= len(sys.argv) <= 4):
exit(
"Specify output directory and storage type: generate_legacy_"
"storage_files.py <output_dir> <storage_type> "
)
output_dir = str(sys.argv[1])
storage_type = str(sys.argv[2])
if storage_type == "pickle":
write_legacy_pickles(output_dir=output_dir)
else:
exit("storage_type must be one of {'pickle'}")
if __name__ == "__main__":
write_legacy_file()
| |
# Copyright(c) 2007-2009 by Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# This file is part of PyCha.
#
# PyCha is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyCha is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with PyCha. If not, see <http://www.gnu.org/licenses/>.
import copy
import inspect
import math
import cairo
from pycha.color import ColorScheme, hex2rgb, DEFAULT_COLOR
class Chart(object):
def __init__(self, surface, options={}):
# this flag is useful to reuse this chart for drawing different data
# or use different options
self.resetFlag = False
# initialize storage
self.datasets = []
# computed values used in several methods
self.area = None # chart area without padding or text labels
self.minxval = None
self.maxxval = None
self.minyval = None
self.maxyval = None
self.xscale = 1.0
self.yscale = 1.0
self.xrange = None
self.yrange = None
self.xticks = []
self.yticks = []
# set the default options
self.options = copy.deepcopy(DEFAULT_OPTIONS)
if options:
self.options.merge(options)
# initialize the surface
self._initSurface(surface)
self.colorScheme = None
def addDataset(self, dataset):
"""Adds an object containing chart data to the storage hash"""
self.datasets += dataset
def _getDatasetsKeys(self):
"""Return the name of each data set"""
return [d[0] for d in self.datasets]
def _getDatasetsValues(self):
"""Return the data (value) of each data set"""
return [d[1] for d in self.datasets]
def setOptions(self, options={}):
"""Sets options of this chart"""
self.options.merge(options)
def getSurfaceSize(self):
cx = cairo.Context(self.surface)
x, y, w, h = cx.clip_extents()
return w, h
def reset(self):
"""Resets options and datasets.
In the next render the surface will be cleaned before any drawing.
"""
self.resetFlag = True
self.options = copy.deepcopy(DEFAULT_OPTIONS)
self.datasets = []
def render(self, surface=None, options={}):
"""Renders the chart with the specified options.
The optional parameters can be used to render a chart in a different
surface with new options.
"""
self._update(options)
if surface:
self._initSurface(surface)
cx = cairo.Context(self.surface)
self._renderBackground(cx)
self._renderChart(cx)
self._renderAxis(cx)
self._renderTitle(cx)
self._renderLegend(cx)
def clean(self):
"""Clears the surface with a white background."""
cx = cairo.Context(self.surface)
cx.save()
cx.set_source_rgb(1, 1, 1)
cx.paint()
cx.restore()
def _setColorscheme(self):
"""Sets the colorScheme used for the chart using the
options.colorScheme option
"""
name = self.options.colorScheme.name
keys = self._getDatasetsKeys()
colorSchemeClass = ColorScheme.getColorScheme(name, None)
if colorSchemeClass is None:
raise ValueError('Color scheme "%s" is invalid!' % name)
# Remove invalid args before calling the constructor
kwargs = dict(self.options.colorScheme.args)
validArgs = inspect.getargspec(colorSchemeClass.__init__)[0]
kwargs = dict([(k, v) for k, v in kwargs.items() if k in validArgs])
self.colorScheme = colorSchemeClass(keys, **kwargs)
def _initSurface(self, surface):
self.surface = surface
if self.resetFlag:
self.resetFlag = False
self.clean()
def _update(self, options={}):
"""Update all the information needed to render the chart"""
self.setOptions(options)
self._setColorscheme()
self._updateXY()
self._updateChart()
self._updateTicks()
def _updateXY(self):
"""Calculates all kinds of metrics for the x and y axis"""
x_range_is_defined = self.options.axis.x.range is not None
y_range_is_defined = self.options.axis.y.range is not None
if not x_range_is_defined or not y_range_is_defined:
stores = self._getDatasetsValues()
# gather data for the x axis
if x_range_is_defined:
self.minxval, self.maxxval = self.options.axis.x.range
else:
xdata = [pair[0] for pair in reduce(lambda a, b: a + b, stores)]
self.minxval = float(min(xdata))
self.maxxval = float(max(xdata))
if self.minxval * self.maxxval > 0 and self.minxval > 0:
self.minxval = 0.0
self.xrange = self.maxxval - self.minxval
if self.xrange == 0:
self.xscale = 1.0
else:
self.xscale = 1.0 / self.xrange
# gather data for the y axis
if y_range_is_defined:
self.minyval, self.maxyval = self.options.axis.y.range
else:
ydata = [pair[1] for pair in reduce(lambda a, b: a + b, stores)]
self.minyval = float(min(ydata))
self.maxyval = float(max(ydata))
if self.minyval * self.maxyval > 0 and self.minyval > 0:
self.minyval = 0.0
self.yrange = self.maxyval - self.minyval
if self.yrange == 0:
self.yscale = 1.0
else:
self.yscale = 1.0 / self.yrange
# calculate area data
surface_width, surface_height = self.getSurfaceSize()
width = (surface_width
- self.options.padding.left - self.options.padding.right)
height = (surface_height
- self.options.padding.top - self.options.padding.bottom)
if self.minyval * self.maxyval < 0: # different signs
origin = abs(self.minyval) * self.yscale
else:
origin = 0
self.area = Area(self.options.padding.left,
self.options.padding.top,
width, height, origin)
def _updateChart(self):
raise NotImplementedError
def _updateTicks(self):
"""Evaluates ticks for x and y axis.
You should call _updateXY before because that method computes the
values of xscale, minxval, yscale, and other attributes needed for
this method.
"""
stores = self._getDatasetsValues()
# evaluate xTicks
self.xticks = []
if self.options.axis.x.ticks:
for tick in self.options.axis.x.ticks:
if not isinstance(tick, Option):
tick = Option(tick)
if tick.label is None:
label = str(tick.v)
else:
label = tick.label
pos = self.xscale * (tick.v - self.minxval)
if 0.0 <= pos <= 1.0:
self.xticks.append((pos, label))
elif self.options.axis.x.interval > 0:
interval = self.options.axis.x.interval
label = (divmod(self.minxval, interval)[0] + 1) * interval
pos = self.xscale * (label - self.minxval)
while 0.0 <= pos <= 1.0:
self.xticks.append((pos, label))
label += interval
pos = self.xscale * (label - self.minxval)
elif self.options.axis.x.tickCount > 0:
uniqx = range(len(uniqueIndices(stores)) + 1)
roughSeparation = self.xrange / self.options.axis.x.tickCount
i = j = 0
while i < len(uniqx) and j < self.options.axis.x.tickCount:
if (uniqx[i] - self.minxval) >= (j * roughSeparation):
pos = self.xscale * (uniqx[i] - self.minxval)
if 0.0 <= pos <= 1.0:
self.xticks.append((pos, uniqx[i]))
j += 1
i += 1
# evaluate yTicks
self.yticks = []
if self.options.axis.y.ticks:
for tick in self.options.axis.y.ticks:
if not isinstance(tick, Option):
tick = Option(tick)
if tick.label is None:
label = str(tick.v)
else:
label = tick.label
pos = 1.0 - (self.yscale * (tick.v - self.minyval))
if 0.0 <= pos <= 1.0:
self.yticks.append((pos, label))
elif self.options.axis.y.interval > 0:
interval = self.options.axis.y.interval
label = (divmod(self.minyval, interval)[0] + 1) * interval
pos = 1.0 - (self.yscale * (label - self.minyval))
while 0.0 <= pos <= 1.0:
self.yticks.append((pos, label))
label += interval
pos = 1.0 - (self.yscale * (label - self.minyval))
elif self.options.axis.y.tickCount > 0:
prec = self.options.axis.y.tickPrecision
num = self.yrange / self.options.axis.y.tickCount
if (num < 1 and prec == 0):
roughSeparation = 1
else:
roughSeparation = round(num, prec)
for i in range(self.options.axis.y.tickCount + 1):
yval = self.minyval + (i * roughSeparation)
pos = 1.0 - ((yval - self.minyval) * self.yscale)
if 0.0 <= pos <= 1.0:
self.yticks.append((pos, round(yval, prec)))
def _renderBackground(self, cx):
"""Renders the background area of the chart"""
if self.options.background.hide:
return
cx.save()
if self.options.background.baseColor:
cx.set_source_rgb(*hex2rgb(self.options.background.baseColor))
cx.paint()
if self.options.background.chartColor:
cx.set_source_rgb(*hex2rgb(self.options.background.chartColor))
cx.rectangle(self.area.x, self.area.y, self.area.w, self.area.h)
cx.fill()
if self.options.background.lineColor:
cx.set_source_rgb(*hex2rgb(self.options.background.lineColor))
cx.set_line_width(self.options.axis.lineWidth)
self._renderLines(cx)
cx.restore()
def _renderLines(self, cx):
"""Aux function for _renderBackground"""
ticks = self.yticks
for tick in ticks:
self._renderLine(cx, tick, False)
def _renderLine(self, cx, tick, horiz):
"""Aux function for _renderLines"""
x1, x2, y1, y2 = (0, 0, 0, 0)
if horiz:
x1 = x2 = tick[0] * self.area.w + self.area.x
y1 = self.area.y
y2 = y1 + self.area.h
else:
x1 = self.area.x
x2 = x1 + self.area.w
y1 = y2 = tick[0] * self.area.h + self.area.y
cx.new_path()
cx.move_to(x1, y1)
cx.line_to(x2, y2)
cx.close_path()
cx.stroke()
def _renderChart(self, cx):
raise NotImplementedError
def _renderYTick(self, cx, tick):
"""Aux method for _renderAxis"""
if callable(tick):
return
x = self.area.x
y = self.area.y + tick[0] * self.area.h
cx.new_path()
cx.move_to(x, y)
cx.line_to(x - self.options.axis.tickSize, y)
cx.close_path()
cx.stroke()
cx.select_font_face(self.options.axis.tickFont,
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
cx.set_font_size(self.options.axis.tickFontSize)
label = unicode(tick[1])
extents = cx.text_extents(label)
labelWidth = extents[2]
labelHeight = extents[3]
if self.options.axis.y.rotate:
radians = math.radians(self.options.axis.y.rotate)
cx.move_to(x - self.options.axis.tickSize
- (labelWidth * math.cos(radians))
- 4,
y + (labelWidth * math.sin(radians))
+ labelHeight / (2.0 / math.cos(radians)))
cx.rotate(-radians)
cx.show_text(label)
cx.rotate(radians) # this is probably faster than a save/restore
else:
cx.move_to(x - self.options.axis.tickSize - labelWidth - 4,
y + labelHeight / 2.0)
cx.show_text(label)
return label
def _renderXTick(self, cx, tick, fontAscent):
if callable(tick):
return
x = self.area.x + tick[0] * self.area.w
y = self.area.y + self.area.h
cx.new_path()
cx.move_to(x, y)
cx.line_to(x, y + self.options.axis.tickSize)
cx.close_path()
cx.stroke()
cx.select_font_face(self.options.axis.tickFont,
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_NORMAL)
cx.set_font_size(self.options.axis.tickFontSize)
label = unicode(tick[1])
extents = cx.text_extents(label)
labelWidth = extents[2]
labelHeight = extents[3]
if self.options.axis.x.rotate:
radians = math.radians(self.options.axis.x.rotate)
cx.move_to(x - (labelHeight * math.cos(radians)),
y + self.options.axis.tickSize
+ (labelHeight * math.cos(radians))
+ 4.0)
cx.rotate(radians)
cx.show_text(label)
cx.rotate(-radians)
else:
cx.move_to(x - labelWidth / 2.0,
y + self.options.axis.tickSize
+ fontAscent + 4.0)
cx.show_text(label)
return label
def _getTickSize(self, cx, ticks, rotate):
tickExtents = [cx.text_extents(unicode(tick[1]))[2:4]
for tick in ticks]
tickWidth = tickHeight = 0.0
if tickExtents:
tickHeight = self.options.axis.tickSize + 4.0
tickWidth = self.options.axis.tickSize + 4.0
widths, heights = zip(*tickExtents)
maxWidth, maxHeight = max(widths), max(heights)
if rotate:
radians = math.radians(rotate)
sinRadians = math.sin(radians)
cosRadians = math.cos(radians)
maxHeight = maxWidth * sinRadians + maxHeight * cosRadians
maxWidth = maxWidth * cosRadians + maxHeight * sinRadians
tickWidth += maxWidth
tickHeight += maxHeight
return tickWidth, tickHeight
def _renderAxisLabel(self, cx, tickWidth, tickHeight, label, x, y,
vertical=False):
cx.new_path()
cx.select_font_face(self.options.axis.labelFont,
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
cx.set_font_size(self.options.axis.labelFontSize)
labelWidth = cx.text_extents(label)[2]
fontAscent = cx.font_extents()[0]
if vertical:
cx.move_to(x, y + labelWidth / 2)
radians = math.radians(90)
cx.rotate(-radians)
else:
cx.move_to(x - labelWidth / 2.0, y + fontAscent)
cx.show_text(label)
def _renderYAxis(self, cx):
"""Draws the vertical line represeting the Y axis"""
cx.new_path()
cx.move_to(self.area.x, self.area.y)
cx.line_to(self.area.x, self.area.y + self.area.h)
cx.close_path()
cx.stroke()
def _renderXAxis(self, cx):
"""Draws the horizontal line representing the X axis"""
cx.new_path()
cx.move_to(self.area.x,
self.area.y + self.area.h * (1.0 - self.area.origin))
cx.line_to(self.area.x + self.area.w,
self.area.y + self.area.h * (1.0 - self.area.origin))
cx.close_path()
cx.stroke()
def _renderAxis(self, cx):
"""Renders axis"""
if self.options.axis.x.hide and self.options.axis.y.hide:
return
cx.save()
cx.set_source_rgb(*hex2rgb(self.options.axis.lineColor))
cx.set_line_width(self.options.axis.lineWidth)
if not self.options.axis.y.hide:
if self.yticks:
for tick in self.yticks:
self._renderYTick(cx, tick)
if self.options.axis.y.label:
cx.save()
rotate = self.options.axis.y.rotate
tickWidth, tickHeight = self._getTickSize(cx, self.yticks,
rotate)
label = unicode(self.options.axis.y.label)
x = self.area.x - tickWidth - 4.0
y = self.area.y + 0.5 * self.area.h
self._renderAxisLabel(cx, tickWidth, tickHeight, label, x, y,
True)
cx.restore()
self._renderYAxis(cx)
if not self.options.axis.x.hide:
fontAscent = cx.font_extents()[0]
if self.xticks:
for tick in self.xticks:
self._renderXTick(cx, tick, fontAscent)
if self.options.axis.x.label:
cx.save()
rotate = self.options.axis.x.rotate
tickWidth, tickHeight = self._getTickSize(cx, self.xticks,
rotate)
label = unicode(self.options.axis.x.label)
x = self.area.x + self.area.w / 2.0
y = self.area.y + self.area.h + tickHeight + 4.0
self._renderAxisLabel(cx, tickWidth, tickHeight, label, x, y,
False)
cx.restore()
self._renderXAxis(cx)
cx.restore()
def _renderTitle(self, cx):
if self.options.title:
cx.save()
cx.select_font_face(self.options.titleFont,
cairo.FONT_SLANT_NORMAL,
cairo.FONT_WEIGHT_BOLD)
cx.set_font_size(self.options.titleFontSize)
title = unicode(self.options.title)
extents = cx.text_extents(title)
titleWidth = extents[2]
x = self.area.x + self.area.w / 2.0 - titleWidth / 2.0
y = cx.font_extents()[0] # font ascent
cx.move_to(x, y)
cx.show_text(title)
cx.restore()
def _renderLegend(self, cx):
"""This function adds a legend to the chart"""
if self.options.legend.hide:
return
surface_width, surface_height = self.getSurfaceSize()
# Compute legend dimensions
padding = 4
bullet = 15
width = 0
height = padding
keys = self._getDatasetsKeys()
for key in keys:
extents = cx.text_extents(key)
width = max(extents[2], width)
height += max(extents[3], bullet) + padding
width = padding + bullet + padding + width + padding
# Compute legend position
legend = self.options.legend
if legend.position.right is not None:
legend.position.left = (surface_width
- legend.position.right
- width)
if legend.position.bottom is not None:
legend.position.top = (surface_height
- legend.position.bottom
- height)
# Draw the legend
cx.save()
cx.rectangle(self.options.legend.position.left,
self.options.legend.position.top,
width, height)
cx.set_source_rgba(1, 1, 1, self.options.legend.opacity)
cx.fill_preserve()
cx.set_line_width(self.options.stroke.width)
cx.set_source_rgb(*hex2rgb(self.options.legend.borderColor))
cx.stroke()
def drawKey(key, x, y, text_height):
cx.rectangle(x, y, bullet, bullet)
cx.set_source_rgb(*self.colorScheme[key])
cx.fill_preserve()
cx.set_source_rgb(0, 0, 0)
cx.stroke()
cx.move_to(x + bullet + padding,
y + bullet / 2.0 + text_height / 2.0)
cx.show_text(key)
cx.set_line_width(1)
x = self.options.legend.position.left + padding
y = self.options.legend.position.top + padding
for key in keys:
extents = cx.text_extents(key)
drawKey(key, x, y, extents[3])
y += max(extents[3], bullet) + padding
cx.restore()
def uniqueIndices(arr):
"""Return a list with the indexes of the biggest element of arr"""
return range(max([len(a) for a in arr]))
class Area(object):
"""Simple rectangle to hold an area coordinates and dimensions"""
def __init__(self, x, y, w, h, origin=0.0):
self.x, self.y, self.w, self.h = x, y, w, h
self.origin = origin
def __str__(self):
msg = "<pycha.chart.Area@(%.2f, %.2f) %.2f x %.2f Origin: %.2f>"
return msg % (self.x, self.y, self.w, self.h, self.origin)
class Option(dict):
"""Useful dict that allow attribute-like access to its keys"""
def __getattr__(self, name):
if name in self.keys():
return self[name]
else:
raise AttributeError(name)
def merge(self, other):
"""Recursive merge with other Option or dict object"""
for key, value in other.items():
if key in self:
if isinstance(self[key], Option):
self[key].merge(other[key])
else:
self[key] = other[key]
DEFAULT_OPTIONS = Option(
axis=Option(
lineWidth=1.0,
lineColor='#0f0000',
tickSize=3.0,
labelColor='#666666',
labelFont='Tahoma',
labelFontSize=9,
labelWidth=50.0,
tickFont='Tahoma',
tickFontSize=9,
x=Option(
hide=False,
ticks=None,
tickCount=10,
tickPrecision=1,
range=None,
rotate=None,
label=None,
interval=0,
),
y=Option(
hide=False,
ticks=None,
tickCount=10,
tickPrecision=1,
range=None,
rotate=None,
label=None,
interval=0,
),
),
background=Option(
hide=False,
baseColor=None,
chartColor='#f5f5f5',
lineColor='#ffffff',
lineWidth=1.5,
),
legend=Option(
opacity=0.8,
borderColor='#000000',
hide=False,
position=Option(top=20, left=40, bottom=None, right=None),
),
padding=Option(
left=30,
right=30,
top=30,
bottom=30,
),
stroke=Option(
color='#ffffff',
hide=False,
shadow=True,
width=2
),
yvals=Option(
show=False,
inside=False,
fontSize=11,
fontColor='#000000',
),
fillOpacity=1.0,
shouldFill=True,
barWidthFillFraction=0.75,
pieRadius=0.4,
colorScheme=Option(
name='gradient',
args=Option(
initialColor=DEFAULT_COLOR,
colors=None,
),
),
title=None,
titleFont='Tahoma',
titleFontSize=12,
)
| |
#!/usr/bin/env python3
# Copyright (c) 2020-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test addr relay
"""
import random
import time
from test_framework.messages import (
CAddress,
msg_addr,
msg_getaddr,
msg_verack,
)
from test_framework.p2p import (
P2PInterface,
p2p_lock,
P2P_SERVICES,
)
from test_framework.test_framework import SyscoinTestFramework
from test_framework.util import assert_equal, assert_greater_than
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
test_addr_contents = False
_tokens = 1
send_getaddr = True
def __init__(self, test_addr_contents=False, send_getaddr=True):
super().__init__()
self.test_addr_contents = test_addr_contents
self.send_getaddr = send_getaddr
def on_addr(self, message):
for addr in message.addrs:
self.num_ipv4_received += 1
if(self.test_addr_contents):
# relay_tests checks the content of the addr messages match
# expectations based on the message creation in setup_addr_msg
assert_equal(addr.nServices, 9)
if not 8333 <= addr.port < 8343:
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
def on_getaddr(self, message):
# When the node sends us a getaddr, it increments the addr relay tokens for the connection by 1000
self._tokens += 1000
@property
def tokens(self):
with p2p_lock:
return self._tokens
def increment_tokens(self, n):
# When we move mocktime forward, the node increments the addr relay tokens for its peers
with p2p_lock:
self._tokens += n
def addr_received(self):
return self.num_ipv4_received != 0
def on_version(self, message):
self.send_message(msg_verack())
if (self.send_getaddr):
self.send_message(msg_getaddr())
def getaddr_received(self):
return self.message_count['getaddr'] > 0
class AddrTest(SyscoinTestFramework):
counter = 0
mock_time = int(time.time())
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [["-whitelist=addr@127.0.0.1"]]
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
self.inbound_blackhole_tests()
# This test populates the addrman, which can impact the node's behavior
# in subsequent tests
self.getaddr_tests()
self.blocksonly_mode_tests()
self.rate_limit_tests()
def setup_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mock_time + i
addr.nServices = P2P_SERVICES
addr.ip = f"123.123.123.{self.counter % 256}"
addr.port = 8333 + i
addrs.append(addr)
self.counter += 1
msg = msg_addr()
msg.addrs = addrs
return msg
def setup_rand_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
# SYSCOIN
addr.time = self.mock_time + i
addr.nServices = P2P_SERVICES
addr.ip = f"{random.randrange(128,169)}.{random.randrange(1,255)}.{random.randrange(1,255)}.{random.randrange(1,255)}"
addr.port = 8333
addrs.append(addr)
msg = msg_addr()
msg.addrs = addrs
return msg
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# SYSCOIN pop m_next_addr_send timer
self.mock_time += 10 * 60
self.nodes[0].setmocktime(self.mock_time)
for peer in receivers:
peer.sync_send_with_ping()
def oversized_addr_test(self):
self.log.info('Send an addr message that is too large')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def relay_tests(self):
self.log.info('Test address relay')
self.log.info('Check that addr message content is relayed and added to addrman')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
num_receivers = 7
receivers = []
for _ in range(num_receivers):
receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)))
# Keep this with length <= 10. Addresses from larger messages are not
# relayed.
num_ipv4_addrs = 10
msg = self.setup_addr_msg(num_ipv4_addrs)
with self.nodes[0].assert_debug_log(
[
'received: addr (301 bytes) peer=1',
]
):
self.send_addr_msg(addr_source, msg, receivers)
total_ipv4_received = sum(r.num_ipv4_received for r in receivers)
# Every IPv4 address must be relayed to two peers, other than the
# originating node (addr_source).
ipv4_branching_factor = 2
assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor)
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True, send_getaddr=False))
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
self.log.info('Check that the first addr message received from an outbound peer is not relayed')
# Currently, there is a flag that prevents the first addr message received
# from a new outbound peer to be relayed to others. Originally meant to prevent
# large GETADDR responses from being relayed, it now typically affects the self-announcement
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
# Send an empty ADDR message to initialize address relay on this connection.
inbound_peer.send_and_ping(msg_addr())
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
assert_equal(inbound_peer.num_ipv4_received, 2)
self.log.info('Check address relay to outbound peers')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
msg3 = self.setup_addr_msg(2)
self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer])
self.log.info('Check that addresses are relayed to full outbound peers')
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.log.info('Check that addresses are not relayed to block-relay-only outbound peers')
assert_equal(block_relay_peer.num_ipv4_received, 0)
self.nodes[0].disconnect_p2ps()
def sum_addr_messages(self, msgs_dict):
return sum(bytes_received for (msg, bytes_received) in msgs_dict.items() if msg in ['addr', 'addrv2', 'getaddr'])
def inbound_blackhole_tests(self):
self.log.info('Check that we only relay addresses to inbound peers who have previously sent us addr related messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
receiver_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
blackhole_peer = self.nodes[0].add_p2p_connection(AddrReceiver(send_getaddr=False))
initial_addrs_received = receiver_peer.num_ipv4_received
peerinfo = self.nodes[0].getpeerinfo()
assert_equal(peerinfo[0]['addr_relay_enabled'], True) # addr_source
assert_equal(peerinfo[1]['addr_relay_enabled'], True) # receiver_peer
assert_equal(peerinfo[2]['addr_relay_enabled'], False) # blackhole_peer
# addr_source sends 2 addresses to node0
msg = self.setup_addr_msg(2)
addr_source.send_and_ping(msg)
# SYSCOIN
self.mock_time += 30 * 60
self.nodes[0].setmocktime(self.mock_time)
receiver_peer.sync_with_ping()
blackhole_peer.sync_with_ping()
peerinfo = self.nodes[0].getpeerinfo()
# Confirm node received addr-related messages from receiver peer
assert_greater_than(self.sum_addr_messages(peerinfo[1]['bytesrecv_per_msg']), 0)
# And that peer received addresses
assert_equal(receiver_peer.num_ipv4_received - initial_addrs_received, 2)
# Confirm node has not received addr-related messages from blackhole peer
assert_equal(self.sum_addr_messages(peerinfo[2]['bytesrecv_per_msg']), 0)
# And that peer did not receive addresses
assert_equal(blackhole_peer.num_ipv4_received, 0)
self.log.info("After blackhole peer sends addr message, it becomes eligible for addr gossip")
blackhole_peer.send_and_ping(msg_addr())
# Confirm node has now received addr-related messages from blackhole peer
assert_greater_than(self.sum_addr_messages(peerinfo[1]['bytesrecv_per_msg']), 0)
assert_equal(self.nodes[0].getpeerinfo()[2]['addr_relay_enabled'], True)
msg = self.setup_addr_msg(2)
self.send_addr_msg(addr_source, msg, [receiver_peer, blackhole_peer])
# And that peer received addresses
assert_equal(blackhole_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
def getaddr_tests(self):
# In the previous tests, the node answered GETADDR requests with an
# empty addrman. Due to GETADDR response caching (see
# CConnman::GetAddresses), the node would continue to provide 0 addrs
# in response until enough time has passed or the node is restarted.
self.restart_node(0)
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
block_relay_peer.sync_with_ping()
assert_equal(block_relay_peer.getaddr_received(), False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(send_getaddr=False))
inbound_peer.sync_with_ping()
# Add some addresses to addrman
for i in range(1000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
self.nodes[0].addpeeraddress(a, 8333)
full_outbound_peer.send_and_ping(msg_getaddr())
block_relay_peer.send_and_ping(msg_getaddr())
inbound_peer.send_and_ping(msg_getaddr())
self.mock_time += 5 * 60
self.nodes[0].setmocktime(self.mock_time)
inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True)
assert_equal(full_outbound_peer.num_ipv4_received, 0)
assert_equal(block_relay_peer.num_ipv4_received, 0)
assert inbound_peer.num_ipv4_received > 100
self.nodes[0].disconnect_p2ps()
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
self.restart_node(0, ["-blocksonly", "-whitelist=addr@127.0.0.1"])
# SYSCOIN
self.mock_time = int(time.time())
self.log.info('Check that we send getaddr messages')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we relay address messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(2)
addr_source.send_and_ping(msg)
self.mock_time += 5 * 60
self.nodes[0].setmocktime(self.mock_time)
full_outbound_peer.sync_with_ping()
self.send_addr_msg(addr_source, msg, [full_outbound_peer])
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
def send_addrs_and_test_rate_limiting(self, peer, no_relay, *, new_addrs, total_addrs):
"""Send an addr message and check that the number of addresses processed and rate-limited is as expected"""
peer.send_and_ping(self.setup_rand_addr_msg(new_addrs))
peerinfo = self.nodes[0].getpeerinfo()[0]
addrs_processed = peerinfo['addr_processed']
addrs_rate_limited = peerinfo['addr_rate_limited']
self.log.debug(f"addrs_processed = {addrs_processed}, addrs_rate_limited = {addrs_rate_limited}")
if no_relay:
assert_equal(addrs_processed, 0)
assert_equal(addrs_rate_limited, 0)
else:
assert_equal(addrs_processed, min(total_addrs, peer.tokens))
assert_equal(addrs_rate_limited, max(0, total_addrs - peer.tokens))
def rate_limit_tests(self):
self.mock_time = int(time.time())
self.restart_node(0, [])
self.nodes[0].setmocktime(self.mock_time)
for conn_type, no_relay in [("outbound-full-relay", False), ("block-relay-only", True), ("inbound", False)]:
self.log.info(f'Test rate limiting of addr processing for {conn_type} peers')
if conn_type == "inbound":
peer = self.nodes[0].add_p2p_connection(AddrReceiver())
else:
peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type=conn_type)
# Send 600 addresses. For all but the block-relay-only peer this should result in addresses being processed.
self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=600, total_addrs=600)
# Send 600 more addresses. For the outbound-full-relay peer (which we send a GETADDR, and thus will
# process up to 1001 incoming addresses), this means more addresses will be processed.
self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=600, total_addrs=1200)
# Send 10 more. As we reached the processing limit for all nodes, no more addresses should be procesesd.
self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=10, total_addrs=1210)
# Advance the time by 100 seconds, permitting the processing of 10 more addresses.
# Send 200 and verify that 10 are processed.
self.mock_time += 100
self.nodes[0].setmocktime(self.mock_time)
peer.increment_tokens(10)
self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=200, total_addrs=1410)
# Advance the time by 1000 seconds, permitting the processing of 100 more addresses.
# Send 200 and verify that 100 are processed.
self.mock_time += 1000
self.nodes[0].setmocktime(self.mock_time)
peer.increment_tokens(100)
self.send_addrs_and_test_rate_limiting(peer, no_relay, new_addrs=200, total_addrs=1610)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
AddrTest().main()
| |
'''Views for pubman.
These are:
media_object_detail: view media objects
article_list_view: called by other views that process article lists
index: suitable frontpage of website view that lists latest published
articles with frontpage set to True, ordered by publication date.
tag_view: lists articles by tag. Can accept complex tag expressions in
the url query.
article_detail: the main view for articles. Processes stories and
translations for article too.
article_detail_by_id: redirects to article_detail (which takes a slug)
story_detail: story view
story_detail_by_id: redirects to generic detail (which takes a slug)
edit_profile: user profile create and edit view
search: Implements a search of the website uses Haystack.
Tested with Whoosh.
markdownpreview: used to preview markdown textareas
'''
import re
from copy import copy
import datetime
from django.shortcuts import render_to_response, get_object_or_404, redirect
from django.http import Http404, HttpResponseRedirect
from django.core.paginator import Paginator, InvalidPage, EmptyPage
#from django.db.models import Q
from django.db.models import get_model
from django.template import RequestContext
from django.views.generic import list_detail
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.cache import cache_page
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext as _
from django.contrib import messages
from tagging.models import TaggedItem
from siteconfig.utils import get_setting
from pubman import settings
from pubman.models import Article, MediaObject, Story
from pubman.models import Translation
from pubman.forms import UserProfileForm, UserForm
from pubman.utils import set_available_languages
def media_object_detail(request, slug):
'''View a media object.
'''
media_obj = get_object_or_404(MediaObject, slug=slug)
if not media_obj.permission_to_view_item(request.user):
raise Http404
return list_detail.object_detail(
request,
queryset = MediaObject.objects.all(),
slug = slug,
extra_context = {'published' : media_obj.is_published()},
)
def article_list_view(request, article_list,
html_template,
published_only=True,
paginate_by=get_setting('ARTICLES_PER_PAGE', settings.ARTICLES_PER_PAGE),
story=None,
additional_context={}):
'''View a filtered list of articles. This is called by other
views listing articles including index and tag_view.
Parameters:
request
html_template: template to use to render this view.
published_only: if true, only published articles will be listed
(defaults to True)
paginate_by: number of articles per page
(defaults to PUBMAN_ARTICLES_PER_PAGE setting)
story: story that must be used with this article - set to none
if there is no story (default=None)
additional_context: dictionary of additional data for template
'''
article_list = article_list.\
filter(Article.permission_to_view_Q(request.user, published_only)).\
order_by('-sticky', '-date_published')
paginator = Paginator(article_list, paginate_by)
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
articles_for_page = paginator.page(page)
except (EmptyPage, InvalidPage):
articles_for_page = paginator.page(paginator.num_pages)
for article in articles_for_page.object_list:
article.blurb = article.blurb.partition(settings.TEXT_SHORTEN_CODE)[0]
from_page = max(1, page-5)
to_page = min(paginator.num_pages, page+5) + 1
page_range = [i for i in range(from_page, to_page)]
additional_context.update( {
'page_obj': articles_for_page,
'page_range' : page_range,
'story' : story,
'page' : page
})
return render_to_response(html_template,
additional_context,
context_instance=RequestContext(request))
def index(request):
'''Front page view.'''
article_list = Article.objects.filter(frontpage=True)
return article_list_view(request, article_list,
'pubman/index.html')
def tag_view(request,
tag_expression,
app_name = 'pubman',
model_name='article',
view=article_list_view,
html_template='pubman/tag.html'):
"""This snippet is a view for complex tag queries that generates a list of model instances matching the set expression generated by the tag_expression argument which in my application is from the querystring. It uses the django-tagging app.
E.g. http://example.com/people/?(deceased&parrot)|"Monty Python"
will retrieve all people who are either deceased parrots or members of Monty Python.
In the tag_expression argument:
* ALL is treated as a keyword. If you happen to have a tag called ALL and want to retrieve all objects with it, surround in quotation marks. E.g. "ALL"
* Examples:
- famous -returns all instances of the model tagged with famous
- famous&deceased -returns all instances of the model tagged both famous and deceased.
- famous|deceased -returns all instances of the model tagged famous or deceased.
- parrot^deceased -returns all alive parrots in the model. (^ equals subtraction)
- ALL^deceased -returns all instances of the model that are not tagged deceased.
- "great author"&deceased -returns all models tagged as great authors and deceased.
Note: This view currently assumes that a tag is composed of letters, digits, underscores and spaces.
Arguments:
* request -- HTTP Request object
* tag_expression -- a set expression of tags, supporting intersection, union, parenthesis and difference
* app_name -- app containing the model
* model_name -- model on which to apply the set operations (defaults to Article)
* view -- view to redirect to after the model instance list has been
constructed
* html_template -- HTML template to redirect (defaults to 'pubman/tag.html')
"""
model_filter = TaggedItem.objects.filter(content_type__model=model_name)
search_string = '\'[\w\s-]+\'|\"[\w\s-]+\"|[\w\s-]+|&|\||\(|\)|\^'
parse_string = re.findall(search_string, tag_expression)
querystring = ""
for token in parse_string:
if token in ['&', '|','^', '(',')']:
if token == '^':
token = '-'
querystring += ' ' + token + ' '
elif token == 'ALL':
querystring += ' set([i.id for i in get_model("' +\
app_name + '", "' +\
model_name + '")'+ '.objects.all()])'
else:
token = token.replace('"','')
token = token.replace("'","")
querystring += ' set([i.object_id for i in '+\
'model_filter.filter(tag__name="' + token + '")])'
try:
instances = eval(querystring)
except:
# This is the fallback when there's an error in the expression.
# A better way might be to raise Http404.
instances = model_filter.filter(tag__name=tag_expression)
object_list = get_model(app_name, model_name).\
objects.all().filter(id__in=instances)
additional_context = {'tag_expression': tag_expression,}
return view(request,
object_list,
html_template,
additional_context=additional_context)
@csrf_protect
def article_detail(request, article_slug, article_lang=None):
'''Article view that also handles translations and stories. The latter are
specified using the story slug in the URL query line, e.g.
/article/a-day-in-the-life/?story=gulag-stories
Translations are indicated in the article_lang argument.
'''
article_obj = get_object_or_404(Article, slug=article_slug)
# Determine if user has permission to view this article
if not article_obj.permission_to_view_item(request.user):
raise Http404
if not article_obj.is_published():
messages.info(request, _('This article is unpublished, '
'but you have permission to view it'))
# Determine if user can moderate the comments on this article
if request.user.has_perm('comments.can_moderate'):
can_moderate_comments = True
else:
can_moderate_comments = False
if request.user.has_perm('pubman.change_article'):
if request.user.has_perm('pubman.edit_other_article'):
user_can_edit = True
else:
try:
article_obj.users_who_can_edit_this.get(id=request.user.id)
user_can_edit = True
except ObjectDoesNotExist:
user_can_edit = False
else:
user_can_edit = False
# Determine if comments are allowed for this article
if not article_obj.comments_allowed or\
(settings.COMMENTERS_MUST_BE_AUTHENTICATED and\
not request.user.is_authenticated()):
comments_allowed = False
else:
comments_allowed = True
# Process translations
translations = Translation.objects.filter(article__id=article_obj.id).\
filter(Translation.permission_to_view_Q(request.user, False))
available_languages = set_available_languages(settings.ALL_LANGUAGES,
translations,article_obj.language)
try:
if article_lang:
i = [t.language for t in translations].index(article_lang)
article_obj.title = translations[i].title
article_obj.subtitle = translations[i].subtitle
article_obj.blurb = translations[i].blurb
article_obj.article_text = translations[i].article_text
article_obj.text_format = translations[i].text_format
article_obj.primary_pullout_quote = translations[i].\
primary_pullout_quote
translator_list = translations[i].full_author_list()
translation_date = translations[i].date_published
translated = True
if not translations[i].is_published():
messages.info(request, _('This translation is unpublished, '
'but you have permission to view it'))
else:
article_lang = article_obj.language
translator_list = None
translation_date = None
translated = False
except ValueError:
raise Http404
# Remove shorten codes from blurbs
article_obj.blurb = article_obj.blurb.replace(settings.TEXT_SHORTEN_CODE, "\n")
# Process page breaks for article_text and put sliced up article into list
# This is not pythonic. Needs improving.
print_this = request.GET.get('print', 'false')
if print_this.strip().lower() == 'true':
print_this = True
else:
print_this = False
article_pages = []
if article_obj.page_break_strategy == 'C' and not print_this:
process_text = article_obj.article_text
index = process_text.find(settings.PAGE_BREAK_CODE)
while len(process_text) > 0:
art = copy(article_obj)
if index > -1:
art.article_text = process_text[0:index]
process_text = process_text[index+len(settings.PAGE_BREAK_CODE):]
index = process_text.find(settings.PAGE_BREAK_CODE)
else:
art.article_text = process_text
process_text = ""
article_pages.append(art)
else:
if print_this:
article_obj.article_text = article_obj.article_text.replace(settings.PAGE_BREAK_CODE,'')
article_pages = [article_obj]
# Must put something into article_pages, else the template might fall over
# when the article_text is referenced.
if len(article_pages)==0:
article_pages = [article_obj,]
paginator = Paginator(article_pages, 1)
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
article_page = paginator.page(page)
except (EmptyPage, InvalidPage):
article_page = paginator.page(paginator.num_pages)
# Process story
story_slug = request.GET.get('story', None)
if story_slug:
try:
story = Story.objects.get(slug=story_slug)
except:
messages.info(request, _('There is no story called ') + story_slug)
story = None
else:
story_articles = Article.\
objects.filter(orderedarticle__story__id=story.id).distinct().\
order_by('orderedarticle__order')
if not request.user.has_perm('pubman.change_article') or\
not request.user.has_perm('pubman.edit_other_article'):
story_articles = story_articles.filter(publication_stage='P',
date_published__lte=datetime.datetime.now())
else:
story = None
story_articles = None
# Process further reading
further_reading = article_obj.further_reading.through.objects.filter(
article__id=article_obj.id)
return render_to_response('pubman/article_detail.html',
{'object' : article_obj,
'paginator' : paginator,
'page_obj': article_page,
'can_moderate_comments' : can_moderate_comments,
'comments_allowed': comments_allowed,
'user_can_edit': user_can_edit,
'print_this_button' : settings.PRINT_THIS,
'print_this' : print_this,
'story' : story,
'story_articles' : story_articles,
'available_languages' : available_languages,
'article_lang' : article_lang,
'translated': translated,
'translator_list': translator_list,
'translation_date': translation_date,
'further_reading' : further_reading
},
context_instance=RequestContext(request)
)
def article_detail_by_id(request, article_id):
'''Redirect to article_detail using slug
'''
article_obj = get_object_or_404(Article, id=article_id)
return redirect(article_detail, article_obj.slug)
def story_detail(request, story_slug):
'''View for story.
'''
story_obj = get_object_or_404(Story, slug=story_slug)
# Set pagination
if story_obj.number_of_articles_per_page == 0:
number_of_articles_per_page = 9999
elif story_obj.number_of_articles_per_page == -1:
number_of_articles_per_page = settings.ARTICLES_PER_PAGE
else:
number_of_articles_per_page = story_obj.number_of_articles_per_page
# Get all articles belonging to this story
article_list = Article.\
objects.filter(orderedarticle__story__id=story_obj.id).\
filter(publication_stage='P', date_published__lte=datetime.datetime.now()).\
distinct().\
order_by('orderedarticle__order')
# Set user_can_edit context variable if user has permission to
# edit this story
if request.user.has_perm('pubman.change_story'):
if request.user.has_perm('pubman.edit_other_story'):
user_can_edit = True
else:
try:
story_obj.users_who_can_edit_this.get(id=request.user.id)
user_can_edit = True
except ObjectDoesNotExist:
user_can_edit = False
else:
user_can_edit = False
return article_list_view(request,
article_list,
'pubman/story_detail.html',
published_only=False,
paginate_by=number_of_articles_per_page,
story=story_obj,
additional_context={
'user_can_edit' : user_can_edit,
})
def story_detail_by_id(request, story_id):
'''Redirects to story view using slug.
'''
story_obj = get_object_or_404(Story, id=story_id)
return redirect(story_detail, story_obj.slug)
######### Override of django-profile create and edit views
@login_required
def edit_profile(request):
'''Create or edit user profile view.
'''
if request.method == 'POST':
# The forms submitted by the client.
user_form = UserForm(request.POST, instance=request.user)
try:
profile_form = UserProfileForm(request.POST, request.FILES, instance=request.user.userprofile)
except ObjectDoesNotExist:
profile_form = UserProfileForm(request.POST, request.FILES)
if user_form.is_valid() and profile_form.is_valid():
# The forms validated correctly.
user_form.save()
profile_form.fields.user = request.user.id
profile_form.save()
# Change this location as necessary.
return HttpResponseRedirect('/profiles/'+request.user.username)
else:
# Initialise the forms.
user_form = UserForm(instance=request.user)
try:
profile_form = UserProfileForm(instance=request.user.userprofile)
except ObjectDoesNotExist:
profile_form = UserProfileForm()
return render_to_response('profiles/edit_profile.html',
{'user_form': user_form, 'profile_form': profile_form,},
context_instance=RequestContext(request))
### Haystack raw search view
from haystack.query import SearchQuerySet
SEARCH_RESULTS_PER_PAGE = getattr(settings, 'HAYSTACK_SEARCH_RESULTS_PER_PAGE', 50)
def search(request):
'''View to do a haystack search of the website content.
Tested using Whoosh.
'''
try:
search_string = request.GET['q']
except KeyError:
search_string = ""
if search_string:
search_results = SearchQuerySet().filter(content=search_string)
paginator = Paginator(search_results, SEARCH_RESULTS_PER_PAGE)
page_range = paginator.page_range
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
object_list = paginator.page(page)
except (EmptyPage, InvalidPage):
object_list = paginator.page(paginator.num_pages)
else:
messages.info(request, _('Please enter something to search'))
object_list = None
page_range = None
page = None
return render_to_response('search/search.html', {
'query' : search_string,
'page' : object_list,
'page_range' : page_range,
'page_no' : page,
}, context_instance=RequestContext(request))
def markdownpreview(request):
'''Used by Markitup! editor to render the markdown for the preview button.
'''
from django.contrib.markup.templatetags.markup import markdown
data = markdown(request.POST.get('data', ''), settings.MARKDOWN_EXTENSIONS)
return render_to_response( 'pubman/markdownpreview.html',
{'preview': data,},
context_instance=RequestContext(request))
from django.contrib.auth.decorators import user_passes_test
@user_passes_test(lambda u: u.is_superuser)
def clear_cache(request):
from django.core.cache import cache
from django.http import HttpResponse
cache.clear()
return HttpResponse(_('The cache has been cleared.'))
| |
from contextlib import contextmanager
from functools import reduce
import sys
import threading
import numpy as np
from .cudadrv.devicearray import FakeCUDAArray, FakeWithinKernelCUDAArray
from .kernelapi import Dim3, FakeCUDAModule, swapped_cuda_module
from ..errors import normalize_kernel_dimensions
from ..args import wrap_arg, ArgHint
"""
Global variable to keep track of the current "kernel context", i.e the
FakeCUDAModule. We only support one kernel launch at a time.
No support for concurrent kernel launch.
"""
_kernel_context = None
@contextmanager
def _push_kernel_context(mod):
"""
Push the current kernel context.
"""
global _kernel_context
assert _kernel_context is None, "conrrent simulated kernel not supported"
_kernel_context = mod
try:
yield
finally:
_kernel_context = None
def _get_kernel_context():
"""
Get the current kernel context. This is usually done by a device function.
"""
return _kernel_context
class FakeDefinition:
'''
Used only to provide the max_cooperative_grid_blocks method
'''
def max_cooperative_grid_blocks(self, blockdim):
# We can only run one block in a cooperative grid because we have no
# mechanism for synchronization between different blocks
return 1
class FakeCUDAKernel(object):
'''
Wraps a @cuda.jit-ed function.
'''
def __init__(self, fn, device, fastmath=False, extensions=[]):
self.fn = fn
self._device = device
self._fastmath = fastmath
self.extensions = list(extensions) # defensive copy
# Initial configuration: grid unconfigured, stream 0, no dynamic shared
# memory.
self.grid_dim = None
self.block_dim = None
self.stream = 0
self.dynshared_size = 0
def __call__(self, *args):
if self._device:
with swapped_cuda_module(self.fn, _get_kernel_context()):
return self.fn(*args)
# Ensure we've been given a valid grid configuration
grid_dim, block_dim = normalize_kernel_dimensions(self.grid_dim,
self.block_dim)
fake_cuda_module = FakeCUDAModule(grid_dim, block_dim,
self.dynshared_size)
with _push_kernel_context(fake_cuda_module):
# fake_args substitutes all numpy arrays for FakeCUDAArrays
# because they implement some semantics differently
retr = []
def fake_arg(arg):
# map the arguments using any extension you've registered
_, arg = reduce(
lambda ty_val, extension: extension.prepare_args(
*ty_val,
stream=0,
retr=retr),
self.extensions,
(None, arg)
)
if isinstance(arg, np.ndarray) and arg.ndim > 0:
ret = wrap_arg(arg).to_device(retr)
elif isinstance(arg, ArgHint):
ret = arg.to_device(retr)
elif isinstance(arg, np.void):
ret = FakeCUDAArray(arg) # In case a np record comes in.
else:
ret = arg
if isinstance(ret, FakeCUDAArray):
return FakeWithinKernelCUDAArray(ret)
return ret
fake_args = [fake_arg(arg) for arg in args]
with swapped_cuda_module(self.fn, fake_cuda_module):
# Execute one block at a time
for grid_point in np.ndindex(*grid_dim):
bm = BlockManager(self.fn, grid_dim, block_dim)
bm.run(grid_point, *fake_args)
for wb in retr:
wb()
def __getitem__(self, configuration):
self.grid_dim, self.block_dim = \
normalize_kernel_dimensions(*configuration[:2])
if len(configuration) == 4:
self.dynshared_size = configuration[3]
return self
def bind(self):
pass
def specialize(self, *args):
return self
def forall(self, ntasks, tpb=0, stream=0, sharedmem=0):
if ntasks < 0:
raise ValueError("Can't create ForAll with negative task count: %s"
% ntasks)
return self[ntasks, 1, stream, sharedmem]
@property
def definition(self):
return FakeDefinition()
@property
def definitions(self):
msg = 'Multiple definitions are unsupported in the simulator'
raise NotImplementedError(msg)
@property
def ptx(self):
'''
Required in order to proceed through some tests, but serves no
functional purpose.
'''
res = '.const'
res += '\n.local'
if self._fastmath:
res += '\ndiv.full.ftz.f32'
return res
# Thread emulation
class BlockThread(threading.Thread):
'''
Manages the execution of a function for a single CUDA thread.
'''
def __init__(self, f, manager, blockIdx, threadIdx):
super(BlockThread, self).__init__(target=f)
self.syncthreads_event = threading.Event()
self.syncthreads_blocked = False
self._manager = manager
self.blockIdx = Dim3(*blockIdx)
self.threadIdx = Dim3(*threadIdx)
self.exception = None
self.daemon = True
self.abort = False
blockDim = Dim3(*self._manager._block_dim)
self.thread_id = self.threadIdx.x + (blockDim.x * (self.threadIdx.y +
blockDim.y *
self.threadIdx.z))
def run(self):
try:
super(BlockThread, self).run()
except Exception as e:
tid = 'tid=%s' % list(self.threadIdx)
ctaid = 'ctaid=%s' % list(self.blockIdx)
if str(e) == '':
msg = '%s %s' % (tid, ctaid)
else:
msg = '%s %s: %s' % (tid, ctaid, e)
tb = sys.exc_info()[2]
# Using `with_traceback` here would cause it to be mutated by
# future raise statements, which may or may not matter.
self.exception = (type(e)(msg), tb)
def syncthreads(self):
if self.abort:
raise RuntimeError("abort flag set on syncthreads call")
self.syncthreads_blocked = True
self.syncthreads_event.wait()
self.syncthreads_event.clear()
if self.abort:
raise RuntimeError("abort flag set on syncthreads clear")
def syncthreads_count(self, value):
idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z
self._manager.block_state[idx] = value
self.syncthreads()
count = np.count_nonzero(self._manager.block_state)
self.syncthreads()
return count
def syncthreads_and(self, value):
idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z
self._manager.block_state[idx] = value
self.syncthreads()
test = np.all(self._manager.block_state)
self.syncthreads()
return 1 if test else 0
def syncthreads_or(self, value):
idx = self.threadIdx.x, self.threadIdx.y, self.threadIdx.z
self._manager.block_state[idx] = value
self.syncthreads()
test = np.any(self._manager.block_state)
self.syncthreads()
return 1 if test else 0
def __str__(self):
return 'Thread <<<%s, %s>>>' % (self.blockIdx, self.threadIdx)
class BlockManager(object):
'''
Manages the execution of a thread block.
When run() is called, all threads are started. Each thread executes until it
hits syncthreads(), at which point it sets its own syncthreads_blocked to
True so that the BlockManager knows it is blocked. It then waits on its
syncthreads_event.
The BlockManager polls threads to determine if they are blocked in
syncthreads(). If it finds a blocked thread, it adds it to the set of
blocked threads. When all threads are blocked, it unblocks all the threads.
The thread are unblocked by setting their syncthreads_blocked back to False
and setting their syncthreads_event.
The polling continues until no threads are alive, when execution is
complete.
'''
def __init__(self, f, grid_dim, block_dim):
self._grid_dim = grid_dim
self._block_dim = block_dim
self._f = f
self.block_state = np.zeros(block_dim, dtype=np.bool_)
def run(self, grid_point, *args):
# Create all threads
threads = set()
livethreads = set()
blockedthreads = set()
for block_point in np.ndindex(*self._block_dim):
def target():
self._f(*args)
t = BlockThread(target, self, grid_point, block_point)
t.start()
threads.add(t)
livethreads.add(t)
# Potential optimisations:
# 1. Continue the while loop immediately after finding a blocked thread
# 2. Don't poll already-blocked threads
while livethreads:
for t in livethreads:
if t.syncthreads_blocked:
blockedthreads.add(t)
elif t.exception:
# Abort all other simulator threads on exception,
# do *not* join immediately to facilitate debugging.
for t_other in threads:
t_other.abort = True
t_other.syncthreads_blocked = False
t_other.syncthreads_event.set()
raise t.exception[0].with_traceback(t.exception[1])
if livethreads == blockedthreads:
for t in blockedthreads:
t.syncthreads_blocked = False
t.syncthreads_event.set()
blockedthreads = set()
livethreads = set([ t for t in livethreads if t.is_alive() ])
# Final check for exceptions in case any were set prior to thread
# finishing, before we could check it
for t in threads:
if t.exception:
raise t.exception[0].with_traceback(t.exception[1])
| |
"""Class to hold all camera accessories."""
import asyncio
from datetime import timedelta
import logging
from haffmpeg.core import HAFFmpeg
from pyhap.camera import (
STREAMING_STATUS,
VIDEO_CODEC_PARAM_LEVEL_TYPES,
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES,
Camera as PyhapCamera,
)
from pyhap.const import CATEGORY_CAMERA
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.event import (
async_track_state_change,
async_track_time_interval,
)
from homeassistant.util import get_local_ip
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_MOTION_DETECTED,
CHAR_STREAMING_STRATUS,
CONF_AUDIO_CODEC,
CONF_AUDIO_MAP,
CONF_AUDIO_PACKET_SIZE,
CONF_LINKED_MOTION_SENSOR,
CONF_MAX_FPS,
CONF_MAX_HEIGHT,
CONF_MAX_WIDTH,
CONF_STREAM_ADDRESS,
CONF_STREAM_SOURCE,
CONF_SUPPORT_AUDIO,
CONF_VIDEO_CODEC,
CONF_VIDEO_MAP,
CONF_VIDEO_PACKET_SIZE,
DEFAULT_AUDIO_CODEC,
DEFAULT_AUDIO_MAP,
DEFAULT_AUDIO_PACKET_SIZE,
DEFAULT_MAX_FPS,
DEFAULT_MAX_HEIGHT,
DEFAULT_MAX_WIDTH,
DEFAULT_SUPPORT_AUDIO,
DEFAULT_VIDEO_CODEC,
DEFAULT_VIDEO_MAP,
DEFAULT_VIDEO_PACKET_SIZE,
SERV_CAMERA_RTP_STREAM_MANAGEMENT,
SERV_MOTION_SENSOR,
)
from .img_util import scale_jpeg_camera_image
from .util import pid_is_alive
_LOGGER = logging.getLogger(__name__)
VIDEO_OUTPUT = (
"-map {v_map} -an "
"-c:v {v_codec} "
"{v_profile}"
"-tune zerolatency -pix_fmt yuv420p "
"-r {fps} "
"-b:v {v_max_bitrate}k -bufsize {v_bufsize}k -maxrate {v_max_bitrate}k "
"-payload_type 99 "
"-ssrc {v_ssrc} -f rtp "
"-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} "
"srtp://{address}:{v_port}?rtcpport={v_port}&"
"localrtcpport={v_port}&pkt_size={v_pkt_size}"
)
AUDIO_OUTPUT = (
"-map {a_map} -vn "
"-c:a {a_encoder} "
"{a_application}"
"-ac 1 -ar {a_sample_rate}k "
"-b:a {a_max_bitrate}k -bufsize {a_bufsize}k "
"-payload_type 110 "
"-ssrc {a_ssrc} -f rtp "
"-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {a_srtp_key} "
"srtp://{address}:{a_port}?rtcpport={a_port}&"
"localrtcpport={a_port}&pkt_size={a_pkt_size}"
)
SLOW_RESOLUTIONS = [
(320, 180, 15),
(320, 240, 15),
]
RESOLUTIONS = [
(320, 180),
(320, 240),
(480, 270),
(480, 360),
(640, 360),
(640, 480),
(1024, 576),
(1024, 768),
(1280, 720),
(1280, 960),
(1920, 1080),
]
VIDEO_PROFILE_NAMES = ["baseline", "main", "high"]
FFMPEG_WATCH_INTERVAL = timedelta(seconds=5)
FFMPEG_WATCHER = "ffmpeg_watcher"
FFMPEG_PID = "ffmpeg_pid"
SESSION_ID = "session_id"
CONFIG_DEFAULTS = {
CONF_SUPPORT_AUDIO: DEFAULT_SUPPORT_AUDIO,
CONF_MAX_WIDTH: DEFAULT_MAX_WIDTH,
CONF_MAX_HEIGHT: DEFAULT_MAX_HEIGHT,
CONF_MAX_FPS: DEFAULT_MAX_FPS,
CONF_AUDIO_CODEC: DEFAULT_AUDIO_CODEC,
CONF_AUDIO_MAP: DEFAULT_AUDIO_MAP,
CONF_VIDEO_MAP: DEFAULT_VIDEO_MAP,
CONF_VIDEO_CODEC: DEFAULT_VIDEO_CODEC,
CONF_AUDIO_PACKET_SIZE: DEFAULT_AUDIO_PACKET_SIZE,
CONF_VIDEO_PACKET_SIZE: DEFAULT_VIDEO_PACKET_SIZE,
}
@TYPES.register("Camera")
class Camera(HomeAccessory, PyhapCamera):
"""Generate a Camera accessory."""
def __init__(self, hass, driver, name, entity_id, aid, config):
"""Initialize a Camera accessory object."""
self._ffmpeg = hass.data[DATA_FFMPEG]
self._cur_session = None
for config_key in CONFIG_DEFAULTS:
if config_key not in config:
config[config_key] = CONFIG_DEFAULTS[config_key]
max_fps = config[CONF_MAX_FPS]
max_width = config[CONF_MAX_WIDTH]
max_height = config[CONF_MAX_HEIGHT]
resolutions = [
(w, h, fps)
for w, h, fps in SLOW_RESOLUTIONS
if w <= max_width and h <= max_height and fps < max_fps
] + [
(w, h, max_fps)
for w, h in RESOLUTIONS
if w <= max_width and h <= max_height
]
video_options = {
"codec": {
"profiles": [
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["BASELINE"],
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["MAIN"],
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["HIGH"],
],
"levels": [
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE3_1"],
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE3_2"],
VIDEO_CODEC_PARAM_LEVEL_TYPES["TYPE4_0"],
],
},
"resolutions": resolutions,
}
audio_options = {"codecs": [{"type": "OPUS", "samplerate": 24}]}
stream_address = config.get(CONF_STREAM_ADDRESS, get_local_ip())
options = {
"video": video_options,
"audio": audio_options,
"address": stream_address,
"srtp": True,
}
super().__init__(
hass,
driver,
name,
entity_id,
aid,
config,
category=CATEGORY_CAMERA,
options=options,
)
self._char_motion_detected = None
self.linked_motion_sensor = self.config.get(CONF_LINKED_MOTION_SENSOR)
if not self.linked_motion_sensor:
return
state = self.hass.states.get(self.linked_motion_sensor)
if not state:
return
serv_motion = self.add_preload_service(SERV_MOTION_SENSOR)
self._char_motion_detected = serv_motion.configure_char(
CHAR_MOTION_DETECTED, value=False
)
self._async_update_motion_state(None, None, state)
async def run_handler(self):
"""Handle accessory driver started event.
Run inside the Home Assistant event loop.
"""
if self._char_motion_detected:
async_track_state_change(
self.hass, self.linked_motion_sensor, self._async_update_motion_state
)
await super().run_handler()
@callback
def _async_update_motion_state(
self, entity_id=None, old_state=None, new_state=None
):
"""Handle link motion sensor state change to update HomeKit value."""
detected = new_state.state == STATE_ON
if self._char_motion_detected.value == detected:
return
self._char_motion_detected.set_value(detected)
_LOGGER.debug(
"%s: Set linked motion %s sensor to %d",
self.entity_id,
self.linked_motion_sensor,
detected,
)
@callback
def async_update_state(self, new_state):
"""Handle state change to update HomeKit value."""
pass # pylint: disable=unnecessary-pass
async def _async_get_stream_source(self):
"""Find the camera stream source url."""
stream_source = self.config.get(CONF_STREAM_SOURCE)
if stream_source:
return stream_source
try:
stream_source = await self.hass.components.camera.async_get_stream_source(
self.entity_id
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Failed to get stream source - this could be a transient error or your camera might not be compatible with HomeKit yet"
)
if stream_source:
self.config[CONF_STREAM_SOURCE] = stream_source
return stream_source
async def start_stream(self, session_info, stream_config):
"""Start a new stream with the given configuration."""
_LOGGER.debug(
"[%s] Starting stream with the following parameters: %s",
session_info["id"],
stream_config,
)
input_source = await self._async_get_stream_source()
if not input_source:
_LOGGER.error("Camera has no stream source")
return False
if "-i " not in input_source:
input_source = "-i " + input_source
video_profile = ""
if self.config[CONF_VIDEO_CODEC] != "copy":
video_profile = (
"-profile:v "
+ VIDEO_PROFILE_NAMES[
int.from_bytes(stream_config["v_profile_id"], byteorder="big")
]
+ " "
)
audio_application = ""
if self.config[CONF_AUDIO_CODEC] == "libopus":
audio_application = "-application lowdelay "
output_vars = stream_config.copy()
output_vars.update(
{
"v_profile": video_profile,
"v_bufsize": stream_config["v_max_bitrate"] * 4,
"v_map": self.config[CONF_VIDEO_MAP],
"v_pkt_size": self.config[CONF_VIDEO_PACKET_SIZE],
"v_codec": self.config[CONF_VIDEO_CODEC],
"a_bufsize": stream_config["a_max_bitrate"] * 4,
"a_map": self.config[CONF_AUDIO_MAP],
"a_pkt_size": self.config[CONF_AUDIO_PACKET_SIZE],
"a_encoder": self.config[CONF_AUDIO_CODEC],
"a_application": audio_application,
}
)
output = VIDEO_OUTPUT.format(**output_vars)
if self.config[CONF_SUPPORT_AUDIO]:
output = output + " " + AUDIO_OUTPUT.format(**output_vars)
_LOGGER.debug("FFmpeg output settings: %s", output)
stream = HAFFmpeg(self._ffmpeg.binary, loop=self.driver.loop)
opened = await stream.open(
cmd=[], input_source=input_source, output=output, stdout_pipe=False
)
if not opened:
_LOGGER.error("Failed to open ffmpeg stream")
return False
session_info["stream"] = stream
_LOGGER.info(
"[%s] Started stream process - PID %d",
session_info["id"],
stream.process.pid,
)
ffmpeg_watcher = async_track_time_interval(
self.hass, self._async_ffmpeg_watch, FFMPEG_WATCH_INTERVAL
)
self._cur_session = {
FFMPEG_WATCHER: ffmpeg_watcher,
FFMPEG_PID: stream.process.pid,
SESSION_ID: session_info["id"],
}
return await self._async_ffmpeg_watch(0)
async def _async_ffmpeg_watch(self, _):
"""Check to make sure ffmpeg is still running and cleanup if not."""
ffmpeg_pid = self._cur_session[FFMPEG_PID]
session_id = self._cur_session[SESSION_ID]
if pid_is_alive(ffmpeg_pid):
return True
_LOGGER.warning("Streaming process ended unexpectedly - PID %d", ffmpeg_pid)
self._async_stop_ffmpeg_watch()
self._async_set_streaming_available(session_id)
return False
@callback
def _async_stop_ffmpeg_watch(self):
"""Cleanup a streaming session after stopping."""
if not self._cur_session:
return
self._cur_session[FFMPEG_WATCHER]()
self._cur_session = None
@callback
def _async_set_streaming_available(self, session_id):
"""Free the session so they can start another."""
self.streaming_status = STREAMING_STATUS["AVAILABLE"]
self.get_service(SERV_CAMERA_RTP_STREAM_MANAGEMENT).get_characteristic(
CHAR_STREAMING_STRATUS
).notify()
async def stop_stream(self, session_info):
"""Stop the stream for the given ``session_id``."""
session_id = session_info["id"]
stream = session_info.get("stream")
if not stream:
_LOGGER.debug("No stream for session ID %s", session_id)
return
self._async_stop_ffmpeg_watch()
if not pid_is_alive(stream.process.pid):
_LOGGER.info("[%s] Stream already stopped.", session_id)
return True
for shutdown_method in ["close", "kill"]:
_LOGGER.info("[%s] %s stream.", session_id, shutdown_method)
try:
await getattr(stream, shutdown_method)()
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"[%s] Failed to %s stream.", session_id, shutdown_method
)
async def reconfigure_stream(self, session_info, stream_config):
"""Reconfigure the stream so that it uses the given ``stream_config``."""
return True
def get_snapshot(self, image_size):
"""Return a jpeg of a snapshot from the camera."""
return scale_jpeg_camera_image(
asyncio.run_coroutine_threadsafe(
self.hass.components.camera.async_get_image(self.entity_id),
self.hass.loop,
).result(),
image_size["image-width"],
image_size["image-height"],
)
| |
import inspect
import uuid
from properties import Properties
from weakref import WeakKeyDictionary
class PostgreSQLTypes(object):
char = 'CHAR'
boolean = 'BOOLEAN'
int = 'INT'
bigint = 'BIGINT'
real = 'real'
def make_this_descriptor(desc_class):
desc_class.__get__ = dunder_get
desc_class.__set__ = dunder_set
return desc_class
def dunder_get(self, instance, instancetype):
return self.data.get(instance, self.value)
def dunder_set(self, instance, new_value):
self._validate(new_value)
self.data[instance] = new_value
class BaseProperty(object):
def __init__(self):
self.property_name = self.__class__.__name__
self.data = WeakKeyDictionary()
self.value = None
def _validate(self, value):
pass
def _type(self):
raise NotImplementedError
def __repr__(self):
return str(self.__class__)
@make_this_descriptor
class StringProperty(BaseProperty):
def __init__(self, size=512):
super(StringProperty, self).__init__()
self._size = size
def _validate(self, value):
if value and not (isinstance(value, basestring)):
raise ValueError("The value " + self.property_name +
" must be a string, but was: " + str(value))
def _type(self):
return '%s(%s)' % (PostgreSQLTypes.char, self._size)
@make_this_descriptor
class BooleanProperty(BaseProperty):
def _validate(self, value):
if value and not (isinstance(value, bool)):
raise ValueError("The value must be bool")
def _type(self):
return PostgreSQLTypes.boolean
@make_this_descriptor
class EnumProperty(BaseProperty):
def __init__(self, cls):
super(EnumProperty, self).__init__()
self.enum_type = cls
def _validate(self, value):
if value and not self.enum_type.exist(value):
raise ValueError("This EnumProperty:" + value +
" is not valid to EnumType:" +
self.enum_type.__name__)
def _type(self):
typeof = type(self.enum_type.values()[0]).__name__
return {'str': PostgreSQLTypes.char, 'int': PostgreSQLTypes.int}[typeof]
@make_this_descriptor
class IntegerProperty(BaseProperty):
def _validate(self, value):
if value and not (isinstance(value, int) or isinstance(value, long)):
raise ValueError("The value " +
self.property_name + " must be a integer")
def _type(self):
return PostgreSQLTypes.int
@make_this_descriptor
class BigIntegerProperty(IntegerProperty):
def _type(self):
return PostgreSQLTypes.bigint
@make_this_descriptor
class FloatProperty(BaseProperty):
def _type(self):
return PostgreSQLTypes.real
class ModelBasicProtocol(type):
@staticmethod
def _mount_property_details(class_dict):
description = {}
for attribute in class_dict:
if isinstance(class_dict[attribute], BaseProperty):
# set the name of the column to the column object
class_dict[attribute].property_name = attribute
description[attribute] = class_dict[attribute]
return description
def __new__(cls, name, parents, dct):
description = {}
# add to _properties_details all properties that the classes
# that this Property inherity.
for parent in parents:
desc = ModelBasicProtocol._mount_property_details(parent.__dict__)
description.update(desc)
# add to _properties_details all properties that this class has
description.update(ModelBasicProtocol._mount_property_details(dct))
dct["_properties_details"] = description
if '__init__' in dct:
init_method = dct['__init__']
init_inspection = inspect.getargspec(init_method)
if init_inspection.defaults is None \
and len(init_inspection.args) > 1:
raise Exception('A modelobject must have '
'a keywords arguments in __init__')
return type.__new__(cls, name, parents, dct)
def generate_id():
return uuid.uuid4().hex
class BaseModel(object):
__metaclass__ = ModelBasicProtocol
uuid = StringProperty()
def __init__(self, *args, **kargs):
for k, value in kargs.items():
if k in dir(self):
setattr(self, k, value)
def to_json(self):
return_json = {}
all_attributes = dir(self)
for key, _ in sorted(self._properties_details.items()):
if key in all_attributes:
value = getattr(self, key)
#if not isinstance(value, Properties):
return_json[key] = value
return return_json
@classmethod
def to_instance(cls, json):
return cls(**json)
@classmethod
def table_name(cls):
return 'tb_%s' % (cls.__name__,)
@classmethod
def columns(cls):
return [column_name for column_name, column_class in sorted(cls._properties_details.items())]
@classmethod
def create_table_sql(cls):
uuid = "uuid %s(36) PRIMARY KEY," % (PostgreSQLTypes.char)
fields = ""
for prop, prop_type in sorted(cls._properties_details.items()):
if prop == 'uuid':
continue
fields += "%s %s, " % (prop, prop_type._type())
fields = fields[0:-2]
return "CREATE TABLE %s (%s %s);" % (cls.table_name(), uuid, fields,)
| |
"""Google config for Cloud."""
import asyncio
import logging
from hass_nabucasa import Cloud, cloud_api
from hass_nabucasa.google_report_state import ErrorResponse
from homeassistant.components.google_assistant.const import DOMAIN as GOOGLE_DOMAIN
from homeassistant.components.google_assistant.helpers import AbstractConfig
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES, HTTP_OK
from homeassistant.core import CoreState, split_entity_id
from homeassistant.helpers import entity_registry, start
from homeassistant.setup import async_setup_component
from .const import (
CONF_ENTITY_CONFIG,
DEFAULT_DISABLE_2FA,
PREF_DISABLE_2FA,
PREF_SHOULD_EXPOSE,
)
from .prefs import CloudPreferences
_LOGGER = logging.getLogger(__name__)
class CloudGoogleConfig(AbstractConfig):
"""HA Cloud Configuration for Google Assistant."""
def __init__(
self, hass, config, cloud_user: str, prefs: CloudPreferences, cloud: Cloud
):
"""Initialize the Google config."""
super().__init__(hass)
self._config = config
self._user = cloud_user
self._prefs = prefs
self._cloud = cloud
self._cur_entity_prefs = self._prefs.google_entity_configs
self._cur_default_expose = self._prefs.google_default_expose
self._sync_entities_lock = asyncio.Lock()
self._sync_on_started = False
@property
def enabled(self):
"""Return if Google is enabled."""
return (
self._cloud.is_logged_in
and not self._cloud.subscription_expired
and self._prefs.google_enabled
)
@property
def entity_config(self):
"""Return entity config."""
return self._config.get(CONF_ENTITY_CONFIG) or {}
@property
def secure_devices_pin(self):
"""Return entity config."""
return self._prefs.google_secure_devices_pin
@property
def should_report_state(self):
"""Return if states should be proactively reported."""
return self.enabled and self._prefs.google_report_state
@property
def local_sdk_webhook_id(self):
"""Return the local SDK webhook.
Return None to disable the local SDK.
"""
return self._prefs.google_local_webhook_id
@property
def local_sdk_user_id(self):
"""Return the user ID to be used for actions received via the local SDK."""
return self._user
@property
def cloud_user(self):
"""Return Cloud User account."""
return self._user
async def async_initialize(self):
"""Perform async initialization of config."""
await super().async_initialize()
async def hass_started(hass):
if self.enabled and GOOGLE_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, GOOGLE_DOMAIN, {})
start.async_at_start(self.hass, hass_started)
# Remove old/wrong user agent ids
remove_agent_user_ids = []
for agent_user_id in self._store.agent_user_ids:
if agent_user_id != self.agent_user_id:
remove_agent_user_ids.append(agent_user_id)
for agent_user_id in remove_agent_user_ids:
await self.async_disconnect_agent_user(agent_user_id)
self._prefs.async_listen_updates(self._async_prefs_updated)
self.hass.bus.async_listen(
entity_registry.EVENT_ENTITY_REGISTRY_UPDATED,
self._handle_entity_registry_updated,
)
def should_expose(self, state):
"""If a state object should be exposed."""
return self._should_expose_entity_id(state.entity_id)
def _should_expose_entity_id(self, entity_id):
"""If an entity ID should be exposed."""
if entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
return False
if not self._config["filter"].empty_filter:
return self._config["filter"](entity_id)
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(entity_id, {})
entity_expose = entity_config.get(PREF_SHOULD_EXPOSE)
if entity_expose is not None:
return entity_expose
default_expose = self._prefs.google_default_expose
# Backwards compat
if default_expose is None:
return True
return split_entity_id(entity_id)[0] in default_expose
@property
def agent_user_id(self):
"""Return Agent User Id to use for query responses."""
return self._cloud.username
@property
def has_registered_user_agent(self):
"""Return if we have a Agent User Id registered."""
return len(self._store.agent_user_ids) > 0
def get_agent_user_id(self, context):
"""Get agent user ID making request."""
return self.agent_user_id
def should_2fa(self, state):
"""If an entity should be checked for 2FA."""
entity_configs = self._prefs.google_entity_configs
entity_config = entity_configs.get(state.entity_id, {})
return not entity_config.get(PREF_DISABLE_2FA, DEFAULT_DISABLE_2FA)
async def async_report_state(self, message, agent_user_id: str):
"""Send a state report to Google."""
try:
await self._cloud.google_report_state.async_send_message(message)
except ErrorResponse as err:
_LOGGER.warning("Error reporting state - %s: %s", err.code, err.message)
async def _async_request_sync_devices(self, agent_user_id: str):
"""Trigger a sync with Google."""
if self._sync_entities_lock.locked():
return HTTP_OK
async with self._sync_entities_lock:
resp = await cloud_api.async_google_actions_request_sync(self._cloud)
return resp.status
async def _async_prefs_updated(self, prefs):
"""Handle updated preferences."""
if self.enabled and GOOGLE_DOMAIN not in self.hass.config.components:
await async_setup_component(self.hass, GOOGLE_DOMAIN, {})
if self.should_report_state != self.is_reporting_state:
if self.should_report_state:
self.async_enable_report_state()
else:
self.async_disable_report_state()
# State reporting is reported as a property on entities.
# So when we change it, we need to sync all entities.
await self.async_sync_entities_all()
# If entity prefs are the same or we have filter in config.yaml,
# don't sync.
elif (
self._cur_entity_prefs is not prefs.google_entity_configs
or self._cur_default_expose is not prefs.google_default_expose
) and self._config["filter"].empty_filter:
self.async_schedule_google_sync_all()
if self.enabled and not self.is_local_sdk_active:
self.async_enable_local_sdk()
elif not self.enabled and self.is_local_sdk_active:
self.async_disable_local_sdk()
self._cur_entity_prefs = prefs.google_entity_configs
self._cur_default_expose = prefs.google_default_expose
async def _handle_entity_registry_updated(self, event):
"""Handle when entity registry updated."""
if not self.enabled or not self._cloud.is_logged_in:
return
# Only consider entity registry updates if info relevant for Google has changed
if event.data["action"] == "update" and not bool(
set(event.data["changes"]) & entity_registry.ENTITY_DESCRIBING_ATTRIBUTES
):
return
entity_id = event.data["entity_id"]
if not self._should_expose_entity_id(entity_id):
return
if self.hass.state != CoreState.running:
return
self.async_schedule_google_sync_all()
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
from decimal import Decimal
from itertools import product
from test_framework.descriptors import descsum_create
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_approx,
assert_equal,
assert_fee_amount,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises_rpc_error,
count_bytes,
find_vout_for_address,
)
def get_unspent(listunspent, amount):
for utx in listunspent:
if utx['amount'] == amount:
return utx
raise AssertionError('Could not find unspent with amount={}'.format(amount))
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# This test isn't testing tx relay. Set whitelist on the peers for
# instant tx relay.
self.extra_args = [['-whitelist=noban@127.0.0.1']] * self.num_nodes
self.rpc_timeout = 90 # to prevent timeouts in `test_transaction_too_large`
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.connect_nodes(0, 1)
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
self.connect_nodes(0, 3)
def run_test(self):
self.log.info("Connect nodes, set fees, generate blocks, and sync")
self.min_relay_tx_fee = self.nodes[0].getnetworkinfo()['relayfee']
# This test is not meant to test fee estimation and we'd like
# to be sure all txs are sent at a consistent desired feerate
for node in self.nodes:
node.settxfee(self.min_relay_tx_fee)
# if the fee's positive delta is higher than this value tests will fail,
# neg. delta always fail the tests.
# The size of the signature of every input may be at most 2 bytes larger
# than a minimum sized signature.
# = 2 bytes * minRelayTxFeePerByte
self.fee_tolerance = 2 * self.min_relay_tx_fee / 1000
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(121)
self.sync_all()
self.test_change_position()
self.test_simple()
self.test_simple_two_coins()
self.test_simple_two_outputs()
self.test_change()
self.test_no_change()
self.test_invalid_option()
self.test_invalid_change_address()
self.test_valid_change_address()
self.test_change_type()
self.test_coin_selection()
self.test_two_vin()
self.test_two_vin_two_vout()
self.test_invalid_input()
self.test_fee_p2pkh()
self.test_fee_p2pkh_multi_out()
self.test_fee_p2sh()
self.test_fee_4of5()
self.test_spend_2of2()
self.test_locked_wallet()
self.test_many_inputs_fee()
self.test_many_inputs_send()
self.test_op_return()
self.test_watchonly()
self.test_all_watched_funds()
self.test_option_feerate()
self.test_address_reuse()
self.test_option_subtract_fee_from_outputs()
self.test_subtract_fee_with_presets()
self.test_transaction_too_large()
self.test_include_unsafe()
def test_change_position(self):
"""Ensure setting changePosition in fundraw with an exact match is handled properly."""
self.log.info("Test fundrawtxn changePosition option")
rawmatch = self.nodes[2].createrawtransaction([], {self.nodes[2].getnewaddress():50})
rawmatch = self.nodes[2].fundrawtransaction(rawmatch, {"changePosition":1, "subtractFeeFromOutputs":[0]})
assert_equal(rawmatch["changepos"], -1)
self.nodes[3].createwallet(wallet_name="wwatch", disable_private_keys=True)
wwatch = self.nodes[3].get_wallet_rpc('wwatch')
watchonly_address = self.nodes[0].getnewaddress()
watchonly_pubkey = self.nodes[0].getaddressinfo(watchonly_address)["pubkey"]
self.watchonly_amount = Decimal(200)
wwatch.importpubkey(watchonly_pubkey, "", True)
self.watchonly_txid = self.nodes[0].sendtoaddress(watchonly_address, self.watchonly_amount)
# Lock UTXO so nodes[0] doesn't accidentally spend it
self.watchonly_vout = find_vout_for_address(self.nodes[0], self.watchonly_txid, watchonly_address)
self.nodes[0].lockunspent(False, [{"txid": self.watchonly_txid, "vout": self.watchonly_vout}])
self.nodes[0].sendtoaddress(self.nodes[3].get_wallet_rpc(self.default_wallet_name).getnewaddress(), self.watchonly_amount / 10)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 5.0)
self.nodes[0].generate(1)
self.sync_all()
wwatch.unloadwallet()
def test_simple(self):
self.log.info("Test fundrawtxn")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test that we have enough inputs
def test_simple_two_coins(self):
self.log.info("Test fundrawtxn with 2 coins")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.2 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert len(dec_tx['vin']) > 0 #test if we have enough inputs
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_simple_two_outputs(self):
self.log.info("Test fundrawtxn with 2 outputs")
inputs = [ ]
outputs = { self.nodes[0].getnewaddress() : 2.6, self.nodes[1].getnewaddress() : 2.5 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert len(dec_tx['vin']) > 0
assert_equal(dec_tx['vin'][0]['scriptSig']['hex'], '')
def test_change(self):
self.log.info("Test fundrawtxn with a vin > required amount")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
self.test_no_change_fee = fee # Use the same fee for the next tx
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_no_change(self):
self.log.info("Test fundrawtxn not having a change output")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = {self.nodes[0].getnewaddress(): Decimal(5.0) - self.test_no_change_fee - self.fee_tolerance}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
fee = rawtxfund['fee']
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
for out in dec_tx['vout']:
totalOut += out['value']
assert_equal(rawtxfund['changepos'], -1)
assert_equal(fee + totalOut, utx['amount']) #compare vin total and totalout+fee
def test_invalid_option(self):
self.log.info("Test fundrawtxn with an invalid option")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-3, "Unexpected key foo", self.nodes[2].fundrawtransaction, rawtx, {'foo':'bar'})
# reserveChangeKey was deprecated and is now removed
assert_raises_rpc_error(-3, "Unexpected key reserveChangeKey", lambda: self.nodes[2].fundrawtransaction(hexstring=rawtx, options={'reserveChangeKey': True}))
def test_invalid_change_address(self):
self.log.info("Test fundrawtxn with an invalid change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_raises_rpc_error(-5, "Change address must be a valid bitcoin address", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':'foobar'})
def test_valid_change_address(self):
self.log.info("Test fundrawtxn with a provided change address")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
change = self.nodes[2].getnewaddress()
assert_raises_rpc_error(-8, "changePosition out of bounds", self.nodes[2].fundrawtransaction, rawtx, {'changeAddress':change, 'changePosition':2})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {'changeAddress': change, 'changePosition': 0})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
out = dec_tx['vout'][0]
assert_equal(change, out['scriptPubKey']['address'])
def test_change_type(self):
self.log.info("Test fundrawtxn with a provided change type")
utx = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']} ]
outputs = { self.nodes[0].getnewaddress() : Decimal(4.0) }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[2].fundrawtransaction, rawtx, {'change_type': None})
assert_raises_rpc_error(-5, "Unknown change type ''", self.nodes[2].fundrawtransaction, rawtx, {'change_type': ''})
rawtx = self.nodes[2].fundrawtransaction(rawtx, {'change_type': 'bech32'})
dec_tx = self.nodes[2].decoderawtransaction(rawtx['hex'])
assert_equal('witness_v0_keyhash', dec_tx['vout'][rawtx['changepos']]['scriptPubKey']['type'])
def test_coin_selection(self):
self.log.info("Test fundrawtxn with a vin < required amount")
utx = get_unspent(self.nodes[2].listunspent(), 1)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']}]
outputs = { self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
# 4-byte version + 1-byte vin count + 36-byte prevout then script_len
rawtx = rawtx[:82] + "0100" + rawtx[84:]
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
# add_inputs is enabled by default
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for i, out in enumerate(dec_tx['vout']):
totalOut += out['value']
if out['scriptPubKey']['address'] in outputs:
matchingOuts+=1
else:
assert_equal(i, rawtxfund['changepos'])
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
assert_equal("00", dec_tx['vin'][0]['scriptSig']['hex'])
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
def test_two_vin(self):
self.log.info("Test fundrawtxn with 2 vins")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {"add_inputs": True})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['address'] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 1)
assert_equal(len(dec_tx['vout']), 2)
matchingIns = 0
for vinOut in dec_tx['vin']:
for vinIn in inputs:
if vinIn['txid'] == vinOut['txid']:
matchingIns+=1
assert_equal(matchingIns, 2) #we now must see two vins identical to vins given as params
def test_two_vin_two_vout(self):
self.log.info("Test fundrawtxn with 2 vins and 2 vouts")
utx = get_unspent(self.nodes[2].listunspent(), 1)
utx2 = get_unspent(self.nodes[2].listunspent(), 5)
inputs = [ {'txid' : utx['txid'], 'vout' : utx['vout']},{'txid' : utx2['txid'], 'vout' : utx2['vout']} ]
outputs = { self.nodes[0].getnewaddress() : 6.0, self.nodes[0].getnewaddress() : 1.0 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(utx['txid'], dec_tx['vin'][0]['txid'])
# Should fail without add_inputs:
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx, {"add_inputs": False})
rawtxfund = self.nodes[2].fundrawtransaction(rawtx, {"add_inputs": True})
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
totalOut = 0
matchingOuts = 0
for out in dec_tx['vout']:
totalOut += out['value']
if out['scriptPubKey']['address'] in outputs:
matchingOuts+=1
assert_equal(matchingOuts, 2)
assert_equal(len(dec_tx['vout']), 3)
def test_invalid_input(self):
self.log.info("Test fundrawtxn with an invalid vin")
inputs = [ {'txid' : "1c7f966dab21119bac53213a2bc7532bff1fa844c124fd750a7d0b1332440bd1", 'vout' : 0} ] #invalid vin!
outputs = { self.nodes[0].getnewaddress() : 1.0}
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[2].fundrawtransaction, rawtx)
def test_fee_p2pkh(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn p2pkh fee")
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2pkh_multi_out(self):
"""Compare fee of a standard pubkeyhash transaction with multiple outputs."""
self.log.info("Test fundrawtxn p2pkh fee with multiple outputs")
inputs = []
outputs = {
self.nodes[1].getnewaddress():1.1,
self.nodes[1].getnewaddress():1.2,
self.nodes[1].getnewaddress():0.1,
self.nodes[1].getnewaddress():1.3,
self.nodes[1].getnewaddress():0.2,
self.nodes[1].getnewaddress():0.3,
}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendmany("", outputs)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_p2sh(self):
"""Compare fee of a 2-of-2 multisig p2sh transaction."""
# Create 2-of-2 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
mSigObj = self.nodes[3].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_fee_4of5(self):
"""Compare fee of a standard pubkeyhash transaction."""
self.log.info("Test fundrawtxn fee with 4-of-5 addresses")
# Create 4-of-5 addr.
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[1].getnewaddress()
addr3 = self.nodes[1].getnewaddress()
addr4 = self.nodes[1].getnewaddress()
addr5 = self.nodes[1].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[1].getaddressinfo(addr2)
addr3Obj = self.nodes[1].getaddressinfo(addr3)
addr4Obj = self.nodes[1].getaddressinfo(addr4)
addr5Obj = self.nodes[1].getaddressinfo(addr5)
mSigObj = self.nodes[1].createmultisig(
4,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
addr3Obj['pubkey'],
addr4Obj['pubkey'],
addr5Obj['pubkey'],
]
)['address']
inputs = []
outputs = {mSigObj:1.1}
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[0].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[0].sendtoaddress(mSigObj, 1.1)
signedFee = self.nodes[0].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance
def test_spend_2of2(self):
"""Spend a 2-of-2 multisig transaction over fundraw."""
self.log.info("Test fundpsbt spending 2-of-2 multisig")
# Create 2-of-2 addr.
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[2].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[2].get_wallet_rpc('wmulti')
w2 = self.nodes[2].get_wallet_rpc(self.default_wallet_name)
mSigObj = wmulti.addmultisigaddress(
2,
[
addr1Obj['pubkey'],
addr2Obj['pubkey'],
]
)['address']
if not self.options.descriptors:
wmulti.importaddress(mSigObj)
# Send 1.2 BTC to msig addr.
self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.nodes[0].generate(1)
self.sync_all()
oldBalance = self.nodes[1].getbalance()
inputs = []
outputs = {self.nodes[1].getnewaddress():1.1}
funded_psbt = wmulti.walletcreatefundedpsbt(inputs=inputs, outputs=outputs, options={'changeAddress': w2.getrawchangeaddress()})['psbt']
signed_psbt = w2.walletprocesspsbt(funded_psbt)
final_psbt = w2.finalizepsbt(signed_psbt['psbt'])
self.nodes[2].sendrawtransaction(final_psbt['hex'])
self.nodes[2].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('1.10000000'), self.nodes[1].getbalance())
wmulti.unloadwallet()
def test_locked_wallet(self):
self.log.info("Test fundrawtxn with locked wallet and hardened derivation")
self.nodes[1].encryptwallet("test")
if self.options.descriptors:
self.nodes[1].walletpassphrase('test', 10)
self.nodes[1].importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/0h/*h)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPdYeeZbPSKd2KYLmeVKtcFA7kqCxDvDR13MQ6us8HopUR2wLcS2ZKPhLyKsqpDL2FtL73LMHcgoCL7DXsciA8eX8nbjCR2eG/1h/*h)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
self.nodes[1].walletlock()
# Drain the keypool.
self.nodes[1].getnewaddress()
self.nodes[1].getrawchangeaddress()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.09999500}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
# fund a transaction that does not require a new key for the change output
self.nodes[1].fundrawtransaction(rawtx)
# fund a transaction that requires a new key for the change output
# creating the key must be impossible because the wallet is locked
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it. Please call keypoolrefill first.", self.nodes[1].fundrawtransaction, rawtx)
# Refill the keypool.
self.nodes[1].walletpassphrase("test", 100)
self.nodes[1].keypoolrefill(8) #need to refill the keypool to get an internal change address
self.nodes[1].walletlock()
assert_raises_rpc_error(-13, "walletpassphrase", self.nodes[1].sendtoaddress, self.nodes[0].getnewaddress(), 1.2)
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():1.1}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Now we need to unlock.
self.nodes[1].walletpassphrase("test", 600)
signedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(signedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
# Make sure funds are received at node1.
assert_equal(oldBalance+Decimal('51.10000000'), self.nodes[0].getbalance())
def test_many_inputs_fee(self):
"""Multiple (~19) inputs tx test | Compare fee."""
self.log.info("Test fundrawtxn fee with many inputs")
# Empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for _ in range(20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
# Create same transaction over sendtoaddress.
txId = self.nodes[1].sendmany("", outputs)
signedFee = self.nodes[1].getrawmempool(True)[txId]['fee']
# Compare fee.
feeDelta = Decimal(fundedTx['fee']) - Decimal(signedFee)
assert feeDelta >= 0 and feeDelta <= self.fee_tolerance * 19 #~19 inputs
def test_many_inputs_send(self):
"""Multiple (~19) inputs tx test | sign/send."""
self.log.info("Test fundrawtxn sign+send with many inputs")
# Again, empty node1, send some small coins from node0 to node1.
self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), self.nodes[1].getbalance(), "", "", True)
self.nodes[1].generate(1)
self.sync_all()
for _ in range(20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
# Fund a tx with ~20 small inputs.
oldBalance = self.nodes[0].getbalance()
inputs = []
outputs = {self.nodes[0].getnewaddress():0.15,self.nodes[0].getnewaddress():0.04}
rawtx = self.nodes[1].createrawtransaction(inputs, outputs)
fundedTx = self.nodes[1].fundrawtransaction(rawtx)
fundedAndSignedTx = self.nodes[1].signrawtransactionwithwallet(fundedTx['hex'])
self.nodes[1].sendrawtransaction(fundedAndSignedTx['hex'])
self.nodes[1].generate(1)
self.sync_all()
assert_equal(oldBalance+Decimal('50.19000000'), self.nodes[0].getbalance()) #0.19+block reward
def test_op_return(self):
self.log.info("Test fundrawtxn with OP_RETURN and no vin")
rawtx = "0100000000010000000000000000066a047465737400000000"
dec_tx = self.nodes[2].decoderawtransaction(rawtx)
assert_equal(len(dec_tx['vin']), 0)
assert_equal(len(dec_tx['vout']), 1)
rawtxfund = self.nodes[2].fundrawtransaction(rawtx)
dec_tx = self.nodes[2].decoderawtransaction(rawtxfund['hex'])
assert_greater_than(len(dec_tx['vin']), 0) # at least one vin
assert_equal(len(dec_tx['vout']), 2) # one change output added
def test_watchonly(self):
self.log.info("Test fundrawtxn using only watchonly")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount / 2}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
self.nodes[3].loadwallet('wwatch')
wwatch = self.nodes[3].get_wallet_rpc('wwatch')
# Setup change addresses for the watchonly wallet
desc_import = [{
"desc": descsum_create("wpkh(tpubD6NzVbkrYhZ4YNXVQbNhMK1WqguFsUXceaVJKbmno2aZ3B6QfbMeraaYvnBSGpV3vxLyTTK9DYT1yoEck4XUScMzXoQ2U2oSmE2JyMedq3H/1/*)"),
"timestamp": "now",
"internal": True,
"active": True,
"keypool": True,
"range": [0, 100],
"watchonly": True,
}]
if self.options.descriptors:
wwatch.importdescriptors(desc_import)
else:
wwatch.importmulti(desc_import)
# Backward compatibility test (2nd params is includeWatching)
result = wwatch.fundrawtransaction(rawtx, True)
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert_equal(res_dec["vin"][0]["txid"], self.watchonly_txid)
assert "fee" in result.keys()
assert_greater_than(result["changepos"], -1)
wwatch.unloadwallet()
def test_all_watched_funds(self):
self.log.info("Test fundrawtxn using entirety of watched funds")
inputs = []
outputs = {self.nodes[2].getnewaddress(): self.watchonly_amount}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
self.nodes[3].loadwallet('wwatch')
wwatch = self.nodes[3].get_wallet_rpc('wwatch')
w3 = self.nodes[3].get_wallet_rpc(self.default_wallet_name)
result = wwatch.fundrawtransaction(rawtx, {'includeWatching': True, 'changeAddress': w3.getrawchangeaddress(), 'subtractFeeFromOutputs': [0]})
res_dec = self.nodes[0].decoderawtransaction(result["hex"])
assert_equal(len(res_dec["vin"]), 1)
assert res_dec["vin"][0]["txid"] == self.watchonly_txid
assert_greater_than(result["fee"], 0)
assert_equal(result["changepos"], -1)
assert_equal(result["fee"] + res_dec["vout"][0]["value"], self.watchonly_amount)
signedtx = wwatch.signrawtransactionwithwallet(result["hex"])
assert not signedtx["complete"]
signedtx = self.nodes[0].signrawtransactionwithwallet(signedtx["hex"])
assert signedtx["complete"]
self.nodes[0].sendrawtransaction(signedtx["hex"])
self.nodes[0].generate(1)
self.sync_all()
wwatch.unloadwallet()
def test_option_feerate(self):
self.log.info("Test fundrawtxn with explicit fee rates (fee_rate sat/vB and feeRate BTC/kvB)")
node = self.nodes[3]
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {node.getnewaddress() : 1}
rawtx = node.createrawtransaction(inputs, outputs)
result = node.fundrawtransaction(rawtx) # uses self.min_relay_tx_fee (set by settxfee)
btc_kvb_to_sat_vb = 100000 # (1e5)
result1 = node.fundrawtransaction(rawtx, {"fee_rate": str(2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee)})
result2 = node.fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee})
result3 = node.fundrawtransaction(rawtx, {"fee_rate": 10 * btc_kvb_to_sat_vb * self.min_relay_tx_fee})
result4 = node.fundrawtransaction(rawtx, {"feeRate": str(10 * self.min_relay_tx_fee)})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
assert_fee_amount(result1['fee'], count_bytes(result1['hex']), 2 * result_fee_rate)
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
assert_fee_amount(result4['fee'], count_bytes(result4['hex']), 10 * result_fee_rate)
# Test that funding non-standard "zero-fee" transactions is valid.
for param, zero_value in product(["fee_rate", "feeRate"], [0, 0.000, 0.00000000, "0", "0.000", "0.00000000"]):
assert_equal(self.nodes[3].fundrawtransaction(rawtx, {param: zero_value})["fee"], 0)
# With no arguments passed, expect fee of 141 satoshis.
assert_approx(node.fundrawtransaction(rawtx)["fee"], vexp=0.00000141, vspan=0.00000001)
# Expect fee to be 10,000x higher when an explicit fee rate 10,000x greater is specified.
result = node.fundrawtransaction(rawtx, {"fee_rate": 10000})
assert_approx(result["fee"], vexp=0.0141, vspan=0.0001)
self.log.info("Test fundrawtxn with invalid estimate_mode settings")
for k, v in {"number": 42, "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type string for estimate_mode, got {}".format(k),
node.fundrawtransaction, rawtx, {"estimate_mode": v, "conf_target": 0.1, "add_inputs": True})
for mode in ["", "foo", Decimal("3.141592")]:
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
node.fundrawtransaction, rawtx, {"estimate_mode": mode, "conf_target": 0.1, "add_inputs": True})
self.log.info("Test fundrawtxn with invalid conf_target settings")
for mode in ["unset", "economical", "conservative"]:
self.log.debug("{}".format(mode))
for k, v in {"string": "", "object": {"foo": "bar"}}.items():
assert_raises_rpc_error(-3, "Expected type number for conf_target, got {}".format(k),
node.fundrawtransaction, rawtx, {"estimate_mode": mode, "conf_target": v, "add_inputs": True})
for n in [-1, 0, 1009]:
assert_raises_rpc_error(-8, "Invalid conf_target, must be between 1 and 1008", # max value of 1008 per src/policy/fees.h
node.fundrawtransaction, rawtx, {"estimate_mode": mode, "conf_target": n, "add_inputs": True})
self.log.info("Test invalid fee rate settings")
for param, value in {("fee_rate", 100000), ("feeRate", 1.000)}:
assert_raises_rpc_error(-4, "Fee exceeds maximum configured by user (e.g. -maxtxfee, maxfeerate)",
node.fundrawtransaction, rawtx, {param: value, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount out of range",
node.fundrawtransaction, rawtx, {param: -1, "add_inputs": True})
assert_raises_rpc_error(-3, "Amount is not a number or string",
node.fundrawtransaction, rawtx, {param: {"foo": "bar"}, "add_inputs": True})
# Test fee rate values that don't pass fixed-point parsing checks.
for invalid_value in ["", 0.000000001, 1e-09, 1.111111111, 1111111111111111, "31.999999999999999999999"]:
assert_raises_rpc_error(-3, "Invalid amount", node.fundrawtransaction, rawtx, {param: invalid_value, "add_inputs": True})
# Test fee_rate values that cannot be represented in sat/vB.
for invalid_value in [0.0001, 0.00000001, 0.00099999, 31.99999999, "0.0001", "0.00000001", "0.00099999", "31.99999999"]:
assert_raises_rpc_error(-3, "Invalid amount",
node.fundrawtransaction, rawtx, {"fee_rate": invalid_value, "add_inputs": True})
self.log.info("Test min fee rate checks are bypassed with fundrawtxn, e.g. a fee_rate under 1 sat/vB is allowed")
node.fundrawtransaction(rawtx, {"fee_rate": 0.999, "add_inputs": True})
node.fundrawtransaction(rawtx, {"feeRate": 0.00000999, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and fee_rate are passed")
assert_raises_rpc_error(-8, "Cannot specify both fee_rate (sat/vB) and feeRate (BTC/kvB)",
node.fundrawtransaction, rawtx, {"fee_rate": 0.1, "feeRate": 0.1, "add_inputs": True})
self.log.info("- raises RPC error if both feeRate and estimate_mode passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and feeRate",
node.fundrawtransaction, rawtx, {"estimate_mode": "economical", "feeRate": 0.1, "add_inputs": True})
for param in ["feeRate", "fee_rate"]:
self.log.info("- raises RPC error if both {} and conf_target are passed".format(param))
assert_raises_rpc_error(-8, "Cannot specify both conf_target and {}. Please provide either a confirmation "
"target in blocks for automatic fee estimation, or an explicit fee rate.".format(param),
node.fundrawtransaction, rawtx, {param: 1, "conf_target": 1, "add_inputs": True})
self.log.info("- raises RPC error if both fee_rate and estimate_mode are passed")
assert_raises_rpc_error(-8, "Cannot specify both estimate_mode and fee_rate",
node.fundrawtransaction, rawtx, {"fee_rate": 1, "estimate_mode": "economical", "add_inputs": True})
def test_address_reuse(self):
"""Test no address reuse occurs."""
self.log.info("Test fundrawtxn does not reuse addresses")
rawtx = self.nodes[3].createrawtransaction(inputs=[], outputs={self.nodes[3].getnewaddress(): 1})
result3 = self.nodes[3].fundrawtransaction(rawtx)
res_dec = self.nodes[0].decoderawtransaction(result3["hex"])
changeaddress = ""
for out in res_dec['vout']:
if out['value'] > 1.0:
changeaddress += out['scriptPubKey']['address']
assert changeaddress != ""
nextaddr = self.nodes[3].getnewaddress()
# Now the change address key should be removed from the keypool.
assert changeaddress != nextaddr
def test_option_subtract_fee_from_outputs(self):
self.log.info("Test fundrawtxn subtractFeeFromOutputs option")
# Make sure there is exactly one input so coin selection can't skew the result.
assert_equal(len(self.nodes[3].listunspent(1)), 1)
inputs = []
outputs = {self.nodes[2].getnewaddress(): 1}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
# Test subtract fee from outputs with feeRate (BTC/kvB)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee, "subtractFeeFromOutputs": [0]}),]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
# Test subtract fee from outputs with fee_rate (sat/vB)
btc_kvb_to_sat_vb = 100000 # (1e5)
result = [self.nodes[3].fundrawtransaction(rawtx), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": []}), # empty subtraction list
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0]}), # uses self.min_relay_tx_fee (set by settxfee)
self.nodes[3].fundrawtransaction(rawtx, {"fee_rate": 2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee}),
self.nodes[3].fundrawtransaction(rawtx, {"fee_rate": 2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee, "subtractFeeFromOutputs": [0]}),]
dec_tx = [self.nodes[3].decoderawtransaction(tx_['hex']) for tx_ in result]
output = [d['vout'][1 - r['changepos']]['value'] for d, r in zip(dec_tx, result)]
change = [d['vout'][r['changepos']]['value'] for d, r in zip(dec_tx, result)]
assert_equal(result[0]['fee'], result[1]['fee'], result[2]['fee'])
assert_equal(result[3]['fee'], result[4]['fee'])
assert_equal(change[0], change[1])
assert_equal(output[0], output[1])
assert_equal(output[0], output[2] + result[2]['fee'])
assert_equal(change[0] + result[0]['fee'], change[2])
assert_equal(output[3], output[4] + result[4]['fee'])
assert_equal(change[3] + result[3]['fee'], change[4])
inputs = []
outputs = {self.nodes[2].getnewaddress(): value for value in (1.0, 1.1, 1.2, 1.3)}
rawtx = self.nodes[3].createrawtransaction(inputs, outputs)
result = [self.nodes[3].fundrawtransaction(rawtx),
# Split the fee between outputs 0, 2, and 3, but not output 1.
self.nodes[3].fundrawtransaction(rawtx, {"subtractFeeFromOutputs": [0, 2, 3]})]
dec_tx = [self.nodes[3].decoderawtransaction(result[0]['hex']),
self.nodes[3].decoderawtransaction(result[1]['hex'])]
# Nested list of non-change output amounts for each transaction.
output = [[out['value'] for i, out in enumerate(d['vout']) if i != r['changepos']]
for d, r in zip(dec_tx, result)]
# List of differences in output amounts between normal and subtractFee transactions.
share = [o0 - o1 for o0, o1 in zip(output[0], output[1])]
# Output 1 is the same in both transactions.
assert_equal(share[1], 0)
# The other 3 outputs are smaller as a result of subtractFeeFromOutputs.
assert_greater_than(share[0], 0)
assert_greater_than(share[2], 0)
assert_greater_than(share[3], 0)
# Outputs 2 and 3 take the same share of the fee.
assert_equal(share[2], share[3])
# Output 0 takes at least as much share of the fee, and no more than 2
# satoshis more, than outputs 2 and 3.
assert_greater_than_or_equal(share[0], share[2])
assert_greater_than_or_equal(share[2] + Decimal(2e-8), share[0])
# The fee is the same in both transactions.
assert_equal(result[0]['fee'], result[1]['fee'])
# The total subtracted from the outputs is equal to the fee.
assert_equal(share[0] + share[2] + share[3], result[0]['fee'])
def test_subtract_fee_with_presets(self):
self.log.info("Test fundrawtxn subtract fee from outputs with preset inputs that are sufficient")
addr = self.nodes[0].getnewaddress()
txid = self.nodes[0].sendtoaddress(addr, 10)
vout = find_vout_for_address(self.nodes[0], txid, addr)
rawtx = self.nodes[0].createrawtransaction([{'txid': txid, 'vout': vout}], [{self.nodes[0].getnewaddress(): 5}])
fundedtx = self.nodes[0].fundrawtransaction(rawtx, {'subtractFeeFromOutputs': [0]})
signedtx = self.nodes[0].signrawtransactionwithwallet(fundedtx['hex'])
self.nodes[0].sendrawtransaction(signedtx['hex'])
def test_transaction_too_large(self):
self.log.info("Test fundrawtx where BnB solution would result in a too large transaction, but Knapsack would not")
self.nodes[0].createwallet("large")
wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
recipient = self.nodes[0].get_wallet_rpc("large")
outputs = {}
rawtx = recipient.createrawtransaction([], {wallet.getnewaddress(): 147.99899260})
# Make 1500 0.1 BTC outputs. The amount that we target for funding is in
# the BnB range when these outputs are used. However if these outputs
# are selected, the transaction will end up being too large, so it
# shouldn't use BnB and instead fall back to Knapsack but that behavior
# is not implemented yet. For now we just check that we get an error.
for _ in range(1500):
outputs[recipient.getnewaddress()] = 0.1
wallet.sendmany("", outputs)
self.nodes[0].generate(10)
assert_raises_rpc_error(-4, "Transaction too large", recipient.fundrawtransaction, rawtx)
def test_include_unsafe(self):
self.log.info("Test fundrawtxn with unsafe inputs")
self.nodes[0].createwallet("unsafe")
wallet = self.nodes[0].get_wallet_rpc("unsafe")
# We receive unconfirmed funds from external keys (unsafe outputs).
addr = wallet.getnewaddress()
txid1 = self.nodes[2].sendtoaddress(addr, 6)
txid2 = self.nodes[2].sendtoaddress(addr, 4)
self.sync_all()
vout1 = find_vout_for_address(wallet, txid1, addr)
vout2 = find_vout_for_address(wallet, txid2, addr)
# Unsafe inputs are ignored by default.
rawtx = wallet.createrawtransaction([], [{self.nodes[2].getnewaddress(): 5}])
assert_raises_rpc_error(-4, "Insufficient funds", wallet.fundrawtransaction, rawtx)
# But we can opt-in to use them for funding.
fundedtx = wallet.fundrawtransaction(rawtx, {"include_unsafe": True})
tx_dec = wallet.decoderawtransaction(fundedtx['hex'])
assert any([txin['txid'] == txid1 and txin['vout'] == vout1 for txin in tx_dec['vin']])
signedtx = wallet.signrawtransactionwithwallet(fundedtx['hex'])
wallet.sendrawtransaction(signedtx['hex'])
# And we can also use them once they're confirmed.
self.nodes[0].generate(1)
rawtx = wallet.createrawtransaction([], [{self.nodes[2].getnewaddress(): 3}])
fundedtx = wallet.fundrawtransaction(rawtx, {"include_unsafe": True})
tx_dec = wallet.decoderawtransaction(fundedtx['hex'])
assert any([txin['txid'] == txid2 and txin['vout'] == vout2 for txin in tx_dec['vin']])
signedtx = wallet.signrawtransactionwithwallet(fundedtx['hex'])
wallet.sendrawtransaction(signedtx['hex'])
if __name__ == '__main__':
RawTransactionsTest().main()
| |
from django.contrib.contenttypes.generic import GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.fields.related import ManyToManyRel, RelatedField
from django.db.models.related import RelatedObject
from django.db.models.fields.related import add_lazy_relation
from django.utils.translation import ugettext_lazy as _
from taggit.forms import TagField
from taggit.models import TaggedItem, GenericTaggedItemBase
from taggit.utils import require_instance_manager
try:
all
except NameError:
# 2.4 compat
try:
from django.utils.itercompat import all
except ImportError:
# 1.1.X compat
def all(iterable):
for item in iterable:
if not item:
return False
return True
class TaggableRel(ManyToManyRel):
def __init__(self):
self.related_name = None
self.limit_choices_to = {}
self.symmetrical = True
self.multiple = True
self.through = None
class TaggableManager(RelatedField):
def __init__(self, verbose_name=_("Tags"),
help_text=_("A comma-separated list of tags."), through=None, blank=False):
self.through = through or TaggedItem
self.rel = TaggableRel()
self.verbose_name = verbose_name
self.help_text = help_text
self.blank = blank
self.editable = True
self.unique = False
self.creates_table = False
self.db_column = None
self.choices = None
self.serialize = False
self.null = True
self.creation_counter = models.Field.creation_counter
models.Field.creation_counter += 1
def __get__(self, instance, model):
if instance is not None and instance.pk is None:
raise ValueError("%s objects need to have a primary key value "
"before you can access their tags." % model.__name__)
manager = _TaggableManager(
through=self.through, model=model, instance=instance
)
return manager
def contribute_to_class(self, cls, name):
self.name = self.column = name
self.model = cls
cls._meta.add_field(self)
setattr(cls, name, self)
if not cls._meta.abstract:
if isinstance(self.through, basestring):
def resolve_related_class(field, model, cls):
self.through = model
self.post_through_setup(cls)
add_lazy_relation(
cls, self, self.through, resolve_related_class
)
else:
self.post_through_setup(cls)
def post_through_setup(self, cls):
self.use_gfk = (
self.through is None or issubclass(self.through, GenericTaggedItemBase)
)
self.rel.to = self.through._meta.get_field("tag").rel.to
if self.use_gfk:
tagged_items = GenericRelation(self.through)
tagged_items.contribute_to_class(cls, "tagged_items")
def save_form_data(self, instance, value):
getattr(instance, self.name).set(*value)
def formfield(self, form_class=TagField, **kwargs):
defaults = {
"label": self.verbose_name,
"help_text": self.help_text,
"required": not self.blank
}
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, instance):
if instance.pk:
return self.through.objects.filter(**self.through.lookup_kwargs(instance))
return self.through.objects.none()
def related_query_name(self):
return self.model._meta.module_name
def m2m_target_field_name(self):
return self.model._meta.pk.name
def m2m_reverse_target_field_name(self):
return self.rel.to._meta.pk.name
def m2m_reverse_name(self):
return self.through._meta.get_field_by_name("tag")[0].column
def m2m_column_name(self):
if self.use_gfk:
return self.through._meta.virtual_fields[0].fk_field
return self.through._meta.get_field('content_object').column
def db_type(self, connection=None):
return None
def m2m_db_table(self):
return self.through._meta.db_table
def extra_filters(self, pieces, pos, negate):
if negate or not self.use_gfk:
return []
prefix = "__".join(["tagged_items"] + pieces[:pos-2])
cts = map(ContentType.objects.get_for_model, _get_subclasses(self.model))
if len(cts) == 1:
return [("%s__content_type" % prefix, cts[0])]
return [("%s__content_type__in" % prefix, cts)]
def bulk_related_objects(self, new_objs, using):
return []
class _TaggableManager(models.Manager):
def __init__(self, through, model, instance):
self.through = through
self.model = model
self.instance = instance
def get_query_set(self):
return self.through.tags_for(self.model, self.instance)
def _lookup_kwargs(self):
return self.through.lookup_kwargs(self.instance)
@require_instance_manager
def add(self, *tags):
str_tags = set([
t
for t in tags
if not isinstance(t, self.through.tag_model())
])
tag_objs = set(tags) - str_tags
# If str_tags has 0 elements Django actually optimizes that to not do a
# query. Malcolm is very smart.
existing = self.through.tag_model().objects.filter(
name__in=str_tags
)
tag_objs.update(existing)
existing_names = set(t.name for t in existing)
existing_names_lower = set(t.name.lower() for t in existing)
for new_name in str_tags - existing_names:
if len(set([new_name.lower()]) - existing_names_lower) > 0:
tag_objs.add(self.through.tag_model().objects.create(name=new_name))
for tag in tag_objs:
self.through.objects.get_or_create(tag=tag, **self._lookup_kwargs())
@require_instance_manager
def set(self, *tags):
self.clear()
self.add(*tags)
@require_instance_manager
def remove(self, *tags):
self.through.objects.filter(**self._lookup_kwargs()).filter(
tag__name__in=tags).delete()
@require_instance_manager
def clear(self):
self.through.objects.filter(**self._lookup_kwargs()).delete()
def most_common(self):
return self.get_query_set().annotate(
num_times=models.Count(self.through.tag_relname())
).order_by('-num_times')
@require_instance_manager
def similar_objects(self, num=None, **filters):
lookup_kwargs = self._lookup_kwargs()
lookup_keys = sorted(lookup_kwargs)
qs = self.through.objects.values(*lookup_kwargs.keys())
qs = qs.annotate(n=models.Count('pk'))
qs = qs.exclude(**lookup_kwargs)
subq = self.all()
qs = qs.filter(tag__in=list(subq))
qs = qs.order_by('-n')
if filters is not None:
qs = qs.filter(**filters)
if num is not None:
qs = qs[:num]
# TODO: This all feels like a bit of a hack.
items = {}
if len(lookup_keys) == 1:
# Can we do this without a second query by using a select_related()
# somehow?
f = self.through._meta.get_field_by_name(lookup_keys[0])[0]
objs = f.rel.to._default_manager.filter(**{
"%s__in" % f.rel.field_name: [r["content_object"] for r in qs]
})
for obj in objs:
items[(getattr(obj, f.rel.field_name),)] = obj
else:
preload = {}
for result in qs:
preload.setdefault(result['content_type'], set())
preload[result["content_type"]].add(result["object_id"])
for ct, obj_ids in preload.iteritems():
ct = ContentType.objects.get_for_id(ct)
for obj in ct.model_class()._default_manager.filter(pk__in=obj_ids):
items[(ct.pk, obj.pk)] = obj
results = []
for result in qs:
obj = items[
tuple(result[k] for k in lookup_keys)
]
obj.similar_tags = result["n"]
results.append(obj)
return results
def _get_subclasses(model):
subclasses = [model]
for f in model._meta.get_all_field_names():
field = model._meta.get_field_by_name(f)[0]
if (isinstance(field, RelatedObject) and
getattr(field.field.rel, "parent_link", None)):
subclasses.extend(_get_subclasses(field.model))
return subclasses
| |
# -*- coding: utf-8 -*-
# Copyright 2016 Mirantis, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import mock
import mockfs
import os
import pytest
import sys
import jsonschema
from jimmy import cli
from mock import call
from click.testing import CliRunner
from jimmy.lib.common import yaml_reader
from jimmy.tests import base
modules_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
jimmy_dir = os.path.dirname(modules_dir)
credentials_schema_path = os.path.join(modules_dir, 'credentials', 'resources', 'schema.yaml')
jenkins_yaml_path = os.path.join(jimmy_dir, 'sample', 'input', 'jenkins.yaml')
class TestCredentialsModule(base.TestCase):
def setup_method(self, method):
self.runner = CliRunner()
def teardown_method(self, method):
mockfs.restore_builtins()
@mock.patch('jimmy.lib.core.load_py_modules')
@mock.patch('subprocess.call')
def test_cli_call(self, mock_subp, mock_modules):
with open(credentials_schema_path, 'r') as f:
mock_credentials_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({os.path.join(jimmy_dir, 'lib', 'schema.yaml'): self.jimmy_schema,
os.path.join(jimmy_dir, 'jimmy.yaml'): self.mock_jimmy_yaml,
credentials_schema_path: mock_credentials_schema,
jenkins_yaml_path: '\n'.join(
[
'jenkins:',
' credentials:',
' password:',
' - scope: global',
' username: user',
' password: passwd',
' description: test username/password user',
' ssh:',
' - scope: global',
' username: user2',
' private_key: /home/user/.ssh/id_rsa',
' id: this-is-an-id',
' file:',
' - scope: global',
' id: secret-key',
' file: /home/user/secret_key',
' description: Secret key',
' kubernetes:',
' - id: kubernetes-credentials',
' scope: global',
' description: kubernetes.example.com service creds',
' token:',
' - scope: global',
' username: user',
' id: user-token',
' description: test token credentials'
])
})
sys.path.insert(0, modules_dir)
import credentials
import read_source
sys.path.pop(0)
mock_modules.return_value = [credentials, read_source]
os.chdir(jimmy_dir)
self.runner.invoke(cli)
calls = [call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'credentials/resources/jenkins.groovy',
'updateCredentials',
"'global'",
"'user'",
"'passwd'",
"'test username/password user'",
"''",
"''",
"''"],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'credentials/resources/jenkins.groovy',
'updateCredentials',
"'global'",
"'user2'",
"''",
"''",
"'/home/user/.ssh/id_rsa'",
"''",
"'this-is-an-id'"],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'credentials/resources/jenkins.groovy',
'updateCredentials',
"'global'",
"''",
"''",
"'Secret key'",
"''",
"'/home/user/secret_key'",
"'secret-key'"],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'credentials/resources/kubernetes.groovy',
'updateCredentials',
'global',
'kubernetes-credentials',
'kubernetes.example.com service creds'],
shell=False),
call(['java',
'-jar', '<< path to jenkins-cli.jar >>',
'-s', 'http://localhost:8080', 'groovy',
modules_dir + '/' + 'credentials/resources/jenkins.groovy',
'updateCredentials',
"'global'",
"'user'",
"''",
"'test token credentials'",
"''",
"''",
"'user-token'"],
shell=False)]
mock_subp.assert_has_calls(calls, any_order=True)
assert 5 == mock_subp.call_count, "subprocess call should be equal to 5"
class TestCredentialsSchema(object):
def setup_method(self, method):
with open(credentials_schema_path, 'r') as f:
mock_credentials_schema = f.read()
self.mfs = mockfs.replace_builtins()
self.mfs.add_entries({credentials_schema_path: mock_credentials_schema})
self.schema = yaml_reader.read(credentials_schema_path)
def teardown_method(self, method):
mockfs.restore_builtins()
def test_valid_repo_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' username: user',
' password: passwd',
' description: test username/password user',
' id: this-is-credentials-id',
' ssh:',
' - scope: global',
' username: user2',
' private_key: /home/user/.ssh/id_rsa'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_valid_oneof_password_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' username: user',
' password: passwd',
' description: test username/password user',
' id: this-is-credentials-id'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_valid_oneof_ssh_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' ssh:',
' - scope: global',
' username: user2',
' private_key: /home/user/.ssh/id_rsa'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_valid_oneof_file_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' file:',
' - scope: global',
' id: secret-key',
' file: /home/user/secret_key',
' description: Secret key'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_valid_oneof_token_data(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' token:',
' - scope: global',
' username: user',
' description: test token credentials',
' id: this-is-token-credentials-id'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
jsonschema.validate(repo_data, self.schema)
def test_password_validation_fail_if_scope_is_not_enum(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: test',
' username: user',
' password: passwd',
' description: test username/password user'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not one of ['global', 'system']"
def test_ssh_validation_fail_if_scope_is_not_enum(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' ssh:',
' - scope: test',
' username: user2',
' private_key: /home/user/.ssh/id_rsa',
' id: this-is-credentials-id'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not one of ['global', 'system']"
def test_file_validation_fail_if_scope_is_not_enum(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' file:',
' - scope: test',
' id: secret-key',
' file: /home/user/secret_key',
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not one of ['global', 'system']"
def test_token_validation_fail_if_scope_is_not_enum(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' token:',
' - scope: test',
' username: user',
' description: test token credentials',
' id: this-is-token-credentials-id'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'test' is not one of ['global', 'system']"
def test_validation_fail_if_username_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' username: 123',
' password: passwd',
' description: test username/password user',
' id: this-is-credentials-id'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_id_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' username: user',
' password: passwd',
' description: test username/password user',
' id: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_password_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' username: user',
' password: 123',
' description: test username/password user'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_descr_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' username: user',
' password: passwd',
' description: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_passphrase_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' ssh:',
' - scope: system',
' username: user2',
' passphrase: 123',
' private_key: /home/user/.ssh/id_rsa'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_private_key_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' ssh:',
' - scope: system',
' username: user2',
' passphrase: psprs',
' private_key: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_validation_fail_if_file_is_not_string(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' file:',
' - scope: global',
' id: secret-key',
' file: 123',
' description: Secret key'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'string'"
def test_password_validation_fail_for_scope_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - username: user',
' password: passwd',
' description: test username/password user'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'scope' is a required property"
def test_password_validation_fail_for_username_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' password: passwd',
' description: test username/password user'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'username' is a required property"
def test_password_validation_fail_for_password_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' username: user',
' description: test username/password user'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'password' is a required property"
def test_ssh_validation_fail_for_scope_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' ssh:',
' - username: user2',
' private_key: /home/user/.ssh/id_rsa'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'scope' is a required property"
def test_ssh_validation_fail_for_username_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' ssh:',
' - scope: global',
' private_key: /home/user/.ssh/id_rsa'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'username' is a required property"
def test_ssh_validation_fail_for_private_key_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' ssh:',
' - scope: global',
' username: user2'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'private_key' is a required property"
def test_file_validation_fail_for_scope_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' file:',
' - id: secret-key',
' file: /home/user/secret_key'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'scope' is a required property"
def test_file_validation_fail_for_file_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' file:',
' - scope: global',
' id: secret-key'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'file' is a required property"
def test_token_validation_fail_for_scope_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' token:',
' - username: user',
' description: test token credentials',
' id: this-is-token-credentials-id'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'scope' is a required property"
def test_token_validation_fail_for_username_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' token:',
' - scope: global',
' description: test token credentials',
' id: this-is-token-credentials-id'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'username' is a required property"
def test_token_validation_fail_for_id_required_property(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' token:',
' - scope: global',
' username: user',
' description: test token credentials'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "'id' is a required property"
def test_validation_fail_if_password_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'password: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_if_ssh_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'ssh: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_if_file_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'file: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_if_token_not_array(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
'token: 123'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "123 is not of type 'array'"
def test_validation_fail_for_password_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' password:',
' - scope: global',
' username: user',
' password: passwd',
' description: test username/password user',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
def test_validation_fail_for_ssh_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' ssh:',
' - scope: global',
' username: user2',
' private_key: /home/user/.ssh/id_rsa',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
def test_validation_fail_for_file_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' file:',
' - scope: global',
' id: secret-key',
' file: /home/user/secret_file',
' test: test'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('test' was unexpected)"
def test_validation_fail_for_token_additional_properties(self):
self.mfs.add_entries({jenkins_yaml_path: '\n'.join(
[
' token:',
' - scope: global',
' username: user',
' password: passwd',
' description: test token credentials',
' id: this-is-token-credentials-id'
])
})
repo_data = yaml_reader.read(jenkins_yaml_path)
with pytest.raises(jsonschema.ValidationError) as excinfo:
jsonschema.validate(repo_data, self.schema)
assert excinfo.value.message == "Additional properties are not allowed ('password' was unexpected)"
| |
#!/usr/bin/env python
#
# Project:
# glideinWMS
#
# File Version:
#
# Description:
# This is the main of the glideinFrontend
#
# Arguments:
# $1 = work_dir
#
# Author:
# Igor Sfiligoi
#
import os
import sys
import fcntl
import subprocess
import traceback
import signal
import time
import string
import logging
STARTUP_DIR = sys.path[0]
sys.path.append(os.path.join(STARTUP_DIR,"../.."))
from glideinwms.lib import logSupport
from glideinwms.lib import cleanupSupport
from glideinwms.frontend import glideinFrontendPidLib
from glideinwms.frontend import glideinFrontendConfig
from glideinwms.frontend import glideinFrontendLib
from glideinwms.frontend import glideinFrontendInterface
from glideinwms.frontend import glideinFrontendMonitorAggregator
from glideinwms.frontend import glideinFrontendMonitoring
from glideinFrontendElement import glideinFrontendElement
############################################################
# KEL remove this method and just call the monitor aggregator method directly below? we don't use the results
def aggregate_stats():
_ = glideinFrontendMonitorAggregator.aggregateStatus()
return
############################################################
class FailureCounter:
def __init__(self, my_name, max_lifetime):
self.my_name=my_name
self.max_lifetime=max_lifetime
self.failure_times=[]
def add_failure(self, when=None):
if when is None:
when = time.time()
self.clean_old()
self.failure_times.append(when)
def get_failures(self):
self.clean_old()
return self.failure_times
def count_failures(self):
return len(self.get_failures())
# INTERNAL
# clean out any old records
def clean_old(self):
min_time=time.time()-self.max_lifetime
while (len(self.failure_times)>0 and
(self.failure_times[0]<min_time)): # I am assuming they are ordered
self.failure_times.pop(0)
############################################################
def spawn_group(work_dir, group_name, action):
global STARTUP_DIR
command_list = [sys.executable,
os.path.join(STARTUP_DIR,
"glideinFrontendElement.py"),
str(os.getpid()),
work_dir,
group_name,
action]
#logSupport.log.debug("Command list: %s" % command_list)
child = subprocess.Popen(command_list, shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# set it in non blocking mode
for fd in (child.stdout.fileno(),
child.stderr.fileno()):
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
return child
############################################################
def poll_group_process(group_name,child):
# empty stdout and stderr
try:
tempOut = child.stdout.read()
if len(tempOut)!=0:
logSupport.log.info("[%s]: %s" % (group_name, tempOut))
except IOError:
pass # ignore
try:
tempErr = child.stderr.read()
if len(tempErr)!=0:
logSupport.log.warning("[%s]: %s" % (group_name, tempErr))
except IOError:
pass # ignore
return child.poll()
############################################################
# return the list of (group,walltime) pairs
def spawn_iteration(work_dir, groups, max_active,
failure_dict, max_failures,
action):
childs = {}
for group_name in groups:
childs[group_name] = {'state':'queued'}
active_groups = 0
groups_tofinish = len(groups)
max_num_failures=0
logSupport.log.info("Starting iteration")
try:
while groups_tofinish>0:
done_something = False
# check if any group finished by now
for group_name in groups:
if childs[group_name]['state']=='spawned':
group_rc = poll_group_process(group_name, childs[group_name]['data'])
if not (group_rc is None): # None means "still alive"
if group_rc==0:
childs[group_name]['state'] = 'finished'
else:
childs[group_name]['state'] = 'failed'
failure_dict[group_name].add_failure()
num_failures=failure_dict[group_name].count_failures()
max_num_failures=max(max_num_failures, num_failures)
logSupport.log.warning("Group %s terminated with exit code %i (%i recent failure)" % (group_name, group_rc, num_failures))
childs[group_name]['end_time']=time.time()
active_groups-=1
groups_tofinish-=1
done_something = True
# see if I can spawn more
for group_name in groups:
if active_groups<max_active: # can spawn more
if childs[group_name]['state']=='queued':
childs[group_name]['data'] = spawn_group(work_dir, group_name, action)
childs[group_name]['state'] = 'spawned'
childs[group_name]['start_time']=time.time()
active_groups+=1
done_something = True
else:
break
if done_something:
logSupport.log.info("Active groups = %i, Groups to finish = %i"%(active_groups,groups_tofinish))
if groups_tofinish>0:
time.sleep(0.01)
logSupport.log.info("All groups finished")
logSupport.log.info("Aggregate monitoring data")
# KEL - can we just call the monitor aggregator method directly? see above
aggregate_stats()
"""
try:
aggregate_stats()
except Exception:
logSupport.log.exception("Aggregate monitoring data .. ERROR")
"""
logSupport.log.info("Cleaning logs")
cleanupSupport.cleaners.cleanup()
if max_num_failures>max_failures:
logSupport.log.info("Too many group failures, aborting")
logSupport.log.debug("Failed %i times (limit %i), aborting"%(max_num_failures,max_failures))
raise RuntimeError, "Too many group failures, aborting"
finally:
# cleanup at exit
# if anything goes wrong, hardkill the rest
for group_name in childs.keys():
if childs[group_name]['state']=='spawned':
logSupport.log.info("Hard killing group %s" % group_name)
try:
os.kill(childs[group_name]['data'].pid,signal.SIGKILL)
except OSError:
pass # ignore failed kills of non-existent processes
# at this point, all groups should have been run
timings=[]
for group_name in groups:
timings.append((group_name,childs[group_name]['end_time']-childs[group_name]['start_time']))
return timings
############################################################
def spawn_cleanup(work_dir,groups):
global STARTUP_DIR
for group_name in groups:
try:
command_list = [sys.executable,
os.path.join(STARTUP_DIR,
"glideinFrontendElement.py"),
str(os.getpid()),
work_dir,
group_name,
"deadvertise"]
#logSupport.log.debug("Command list: %s" % command_list)
child = subprocess.Popen(command_list, shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# set it in non blocking mode
for fd in (child.stdout.fileno(),
child.stderr.fileno()):
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while poll_group_process(group_name,child) is None:
# None means "still alive"
time.sleep(0.01)
except:
# never fail on cleanup
pass
############################################################
def spawn(sleep_time, advertize_rate, work_dir, frontendDescript,
groups, max_parallel_workers, restart_interval, restart_attempts):
num_groups=len(groups)
# TODO: Get the ha_check_interval from the config
ha = glideinFrontendLib.getHASettings(frontendDescript.data)
ha_check_interval = glideinFrontendLib.getHACheckInterval(frontendDescript.data)
mode = glideinFrontendLib.getHAMode(frontendDescript.data)
master_frontend_name = ''
if mode == 'slave':
master_frontend_name = ha.get('ha_frontends')[0].get('frontend_name')
active = (mode == 'master')
hibernate = shouldHibernate(frontendDescript, work_dir, ha, mode, groups)
logSupport.log.info('Frontend started with mode = %s' % mode)
try:
# Service will exit on signal only.
# This infinite loop is for the slave to go back into hibernation
# once the master becomes alive.
# Master never loops infinitely here, but instead it does in
# the inner loop while(mode=='master') ...
while 1:
while hibernate:
# If I am slave enter hibernation cycle while Master is alive
logSupport.log.info('Master Frontend %s is online. Hibernating.' % master_frontend_name)
time.sleep(ha_check_interval)
hibernate = shouldHibernate(frontendDescript, work_dir,
ha, mode, groups)
# We broke out of hibernation cycle
# Either Master has disappeared or I am the Master
if mode == 'slave':
logSupport.log.info("Master frontend %s is offline. Activating slave frontend." % master_frontend_name)
active = True
failure_dict={}
for group in groups:
failure_dict[group]=FailureCounter(group, restart_interval)
while ((mode == 'master') or ((mode == 'slave') and active)):
start_time=time.time()
timings = spawn_iteration(work_dir, groups,
max_parallel_workers, failure_dict,
restart_attempts, "run")
end_time=time.time()
elapsed_time=end_time-start_time
if elapsed_time<sleep_time:
real_sleep_time=sleep_time-elapsed_time
logSupport.log.info("Sleep %.1f sec" % real_sleep_time)
time.sleep(real_sleep_time)
else:
logSupport.log.info("No sleeping this loop, took %.1f sec > %.1f sec" % (elapsed_time, sleep_time))
# order the groups by walltime
# longest walltime first
timings.sort(lambda x,y:-cmp(x[1],y[1]))
# recreate the groups list, with new ordering
groups=[el[0] for el in timings]
assert num_groups==len(groups), "Something went wrong, number of groups changed"
if mode == 'slave':
# If we are slave, check if master is back and if so
# deadvertise my classads and hibernate
hibernate = shouldHibernate(frontendDescript, work_dir,
ha, mode, groups)
if hibernate:
active = False
logSupport.log.info("Master frontend %s is back online" % master_frontend_name)
logSupport.log.info("Deadvertize my ads and enter hibernation cycle")
spawn_cleanup(work_dir, groups)
else:
logSupport.log.info("Master frontend %s is still offline" % master_frontend_name)
finally:
# We have been asked to terminate
logSupport.log.info("Deadvertize my ads")
spawn_cleanup(work_dir,groups)
############################################################
def shouldHibernate(frontendDescript, work_dir, ha, mode, groups):
"""
Check if the frontend is running in HA mode. If run in master mode never
hibernate. If run in slave mode, hiberate if master is active.
@rtype: bool
@return: True if we should hibernate else False
"""
if mode == 'slave':
master_frontend_name = ha.get('ha_frontends')[0].get('frontend_name')
for group in groups:
element = glideinFrontendElement(os.getpid(), work_dir,
group, "run")
os.environ['CONDOR_CONFIG'] = element.elementDescript.frontend_data['CondorConfig']
os.environ['_CONDOR_CERTIFICATE_MAPFILE'] = element.elementDescript.element_data['MapFile']
os.environ['X509_USER_PROXY'] = element.elementDescript.frontend_data['ClassAdProxy']
for factory_pool in element.factory_pools:
factory_pool_node = factory_pool[0]
master_classads = glideinFrontendInterface.findMasterFrontendClassads(factory_pool_node, master_frontend_name)
if master_classads:
# Found some classads in one of the collectors
# Cleanup the env and return True
clean_htcondor_env()
return True
# Cleanup the env
clean_htcondor_env()
return False
def clean_htcondor_env():
for v in ('CONDOR_CONFIG','_CONDOR_CERTIFICATE_MAPFILE','X509_USER_PROXY'):
if os.environ.get(v):
del os.environ[v]
############################################################
def spawn_removal(work_dir, frontendDescript, groups,
max_parallel_workers, removal_action):
failure_dict={}
for group in groups:
failure_dict[group]=FailureCounter(group, 3600)
spawn_iteration(work_dir,groups,max_parallel_workers,
failure_dict, 1, removal_action)
############################################################
def cleanup_environ():
for val in os.environ.keys():
val_low = val.lower()
if val_low[:8] == "_condor_":
# remove any CONDOR environment variables
# don't want any surprises
del os.environ[val]
elif val_low[:5] == "x509_":
# remove any X509 environment variables
# don't want any surprises
del os.environ[val]
############################################################
def main(work_dir, action):
startup_time=time.time()
glideinFrontendConfig.frontendConfig.frontend_descript_file = os.path.join(work_dir, glideinFrontendConfig.frontendConfig.frontend_descript_file)
frontendDescript = glideinFrontendConfig.FrontendDescript(work_dir)
# the log dir is shared between the frontend main and the groups, so use a subdir
logSupport.log_dir = os.path.join(frontendDescript.data['LogDir'], "frontend")
# Configure frontend process logging
process_logs = eval(frontendDescript.data['ProcessLogs'])
for plog in process_logs:
logSupport.add_processlog_handler("frontend", logSupport.log_dir,
plog['msg_types'], plog['extension'],
int(float(plog['max_days'])),
int(float(plog['min_days'])),
int(float(plog['max_mbytes'])),
int(float(plog['backup_count'])),
plog['compression'])
logSupport.log = logging.getLogger("frontend")
logSupport.log.info("Logging initialized")
logSupport.log.debug("Frontend startup time: %s" % str(startup_time))
try:
cleanup_environ()
# we use a dedicated config... ignore the system-wide
os.environ['CONDOR_CONFIG'] = frontendDescript.data['CondorConfig']
sleep_time = int(frontendDescript.data['LoopDelay'])
advertize_rate = int(frontendDescript.data['AdvertiseDelay'])
max_parallel_workers = int(frontendDescript.data['GroupParallelWorkers'])
restart_attempts = int(frontendDescript.data['RestartAttempts'])
restart_interval = int(frontendDescript.data['RestartInterval'])
groups = string.split(frontendDescript.data['Groups'], ',')
groups.sort()
glideinFrontendMonitorAggregator.monitorAggregatorConfig.config_frontend(os.path.join(work_dir, "monitor"), groups)
except:
logSupport.log.exception("Exception occurred configuring monitoring: ")
raise
glideinFrontendMonitoring.write_frontend_descript_xml(frontendDescript, os.path.join(work_dir, 'monitor/'))
logSupport.log.info("Enabled groups: %s" % groups)
# create lock file
pid_obj = glideinFrontendPidLib.FrontendPidSupport(work_dir)
# start
try:
pid_obj.register(action)
except glideinFrontendPidLib.pidSupport.AlreadyRunning, err:
pid_obj.load_registered()
logSupport.log.exception("Failed starting Frontend with action %s. Instance with pid %s is aready running for action %s. Exception during pid registration: %s" %
(action, pid_obj.mypid , str(pid_obj.action_type), err))
raise
try:
try:
if action=="run":
spawn(sleep_time, advertize_rate, work_dir,
frontendDescript, groups, max_parallel_workers,
restart_interval, restart_attempts)
elif action in ('removeWait','removeIdle','removeAll','removeWaitExcess','removeIdleExcess','removeAllExcess'):
spawn_removal(work_dir, frontendDescript, groups,
max_parallel_workers, action)
else:
raise ValueError, "Unknown action: %s"%action
except KeyboardInterrupt:
logSupport.log.info("Received signal...exit")
except:
logSupport.log.exception("Exception occurred trying to spawn: ")
finally:
pid_obj.relinquish()
############################################################
#
# S T A R T U P
#
############################################################
def termsignal(signr, frame):
raise KeyboardInterrupt, "Received signal %s" % signr
if __name__ == '__main__':
signal.signal(signal.SIGTERM, termsignal)
signal.signal(signal.SIGQUIT, termsignal)
if len(sys.argv)==2:
action = "run"
else:
action = sys.argv[2]
main(sys.argv[1], action)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class CropsOperations(object):
"""CropsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
phenotypes=None, # type: Optional[List[str]]
ids=None, # type: Optional[List[str]]
names=None, # type: Optional[List[str]]
property_filters=None, # type: Optional[List[str]]
statuses=None, # type: Optional[List[str]]
min_created_date_time=None, # type: Optional[datetime.datetime]
max_created_date_time=None, # type: Optional[datetime.datetime]
min_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_page_size=50, # type: Optional[int]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.CropListResponse"]
"""Returns a paginated list of crop resources.
:param phenotypes: Crop phenotypes of the resource.
:type phenotypes: list[str]
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CropListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.agrifood.farming.models.CropListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CropListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if phenotypes is not None:
query_parameters['phenotypes'] = [self._serialize.query("phenotypes", q, 'str') if q is not None else '' for q in phenotypes]
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('CropListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/crops'} # type: ignore
def get(
self,
crop_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Crop"
"""Gets a specified crop resource.
:param crop_id: ID of the crop.
:type crop_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Crop, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Crop
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Crop"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'cropId': self._serialize.url("crop_id", crop_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Crop', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/crops/{cropId}'} # type: ignore
def create_or_update(
self,
crop_id, # type: str
crop=None, # type: Optional["_models.Crop"]
**kwargs # type: Any
):
# type: (...) -> "_models.Crop"
"""Creates or updates a crop resource.
:param crop_id: ID of the crop resource.
:type crop_id: str
:param crop: Crop resource payload to create or update.
:type crop: ~azure.agrifood.farming.models.Crop
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Crop, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.Crop
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Crop"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/merge-patch+json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'cropId': self._serialize.url("crop_id", crop_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if crop is not None:
body_content = self._serialize.body(crop, 'Crop')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('Crop', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Crop', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/crops/{cropId}'} # type: ignore
def delete(
self,
crop_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes Crop for given crop id.
:param crop_id: ID of crop to be deleted.
:type crop_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'cropId': self._serialize.url("crop_id", crop_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/crops/{cropId}'} # type: ignore
| |
"""Pythonic command-line interface parser that will make you smile.
* http://docopt.org
* Repository and issue-tracker: https://github.com/docopt/docopt
* Licensed under terms of MIT license (see LICENSE-MIT)
* Copyright (c) 2013 Vladimir Keleshev, vladimir@keleshev.com
"""
import sys
import re
__all__ = ['docopt']
__version__ = '0.6.2'
class DocoptLanguageError(Exception):
"""Error in construction of usage-message by developer."""
class DocoptExit(SystemExit):
"""Exit in case user invoked program with incorrect arguments."""
usage = ''
def __init__(self, message=''):
SystemExit.__init__(self, (message + '\n' + self.usage).strip())
class Pattern(object):
def __eq__(self, other):
return repr(self) == repr(other)
def __hash__(self):
return hash(repr(self))
def fix(self):
self.fix_identities()
self.fix_repeating_arguments()
return self
def fix_identities(self, uniq=None):
"""Make pattern-tree tips point to same object if they are equal."""
if not hasattr(self, 'children'):
return self
uniq = list(set(self.flat())) if uniq is None else uniq
for i, c in enumerate(self.children):
if not hasattr(c, 'children'):
assert c in uniq
self.children[i] = uniq[uniq.index(c)]
else:
c.fix_identities(uniq)
def fix_repeating_arguments(self):
"""Fix elements that should accumulate/increment values."""
either = [list(c.children) for c in self.either.children]
for case in either:
for e in [c for c in case if case.count(c) > 1]:
if type(e) is Argument or type(e) is Option and e.argcount:
if e.value is None:
e.value = []
elif type(e.value) is not list:
e.value = e.value.split()
if type(e) is Command or type(e) is Option and e.argcount == 0:
e.value = 0
return self
@property
def either(self):
"""Transform pattern into an equivalent, with only top-level Either."""
# Currently the pattern will not be equivalent, but more "narrow",
# although good enough to reason about list arguments.
ret = []
groups = [[self]]
while groups:
children = groups.pop(0)
types = [type(c) for c in children]
if Either in types:
either = [c for c in children if type(c) is Either][0]
children.pop(children.index(either))
for c in either.children:
groups.append([c] + children)
elif Required in types:
required = [c for c in children if type(c) is Required][0]
children.pop(children.index(required))
groups.append(list(required.children) + children)
elif Optional in types:
optional = [c for c in children if type(c) is Optional][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif AnyOptions in types:
optional = [c for c in children if type(c) is AnyOptions][0]
children.pop(children.index(optional))
groups.append(list(optional.children) + children)
elif OneOrMore in types:
oneormore = [c for c in children if type(c) is OneOrMore][0]
children.pop(children.index(oneormore))
groups.append(list(oneormore.children) * 2 + children)
else:
ret.append(children)
return Either(*[Required(*e) for e in ret])
class ChildPattern(Pattern):
def __init__(self, name, value=None):
self.name = name
self.value = value
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.value)
def flat(self, *types):
return [self] if not types or type(self) in types else []
def match(self, left, collected=None):
collected = [] if collected is None else collected
pos, match = self.single_match(left)
if match is None:
return False, left, collected
left_ = left[:pos] + left[pos + 1:]
same_name = [a for a in collected if a.name == self.name]
if type(self.value) in (int, list):
if type(self.value) is int:
increment = 1
else:
increment = ([match.value] if type(match.value) is str
else match.value)
if not same_name:
match.value = increment
return True, left_, collected + [match]
same_name[0].value += increment
return True, left_, collected
return True, left_, collected + [match]
class ParentPattern(Pattern):
def __init__(self, *children):
self.children = list(children)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__,
', '.join(repr(a) for a in self.children))
def flat(self, *types):
if type(self) in types:
return [self]
return sum([c.flat(*types) for c in self.children], [])
class Argument(ChildPattern):
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
return n, Argument(self.name, p.value)
return None, None
@classmethod
def parse(cls, source):
name = re.findall('(<\S*?>)', source)[0]
value = re.findall('\[default: (.*)\]', source, flags=re.I)
return cls(name, value[0] if value else None)
class Command(Argument):
def __init__(self, name, value=False):
self.name = name
self.value = value
def single_match(self, left):
for n, p in enumerate(left):
if type(p) is Argument:
if p.value == self.name:
return n, Command(self.name, True)
else:
break
return None, None
class Option(ChildPattern):
def __init__(self, short=None, long=None, argcount=0, value=False):
assert argcount in (0, 1)
self.short, self.long = short, long
self.argcount, self.value = argcount, value
self.value = None if value is False and argcount else value
@classmethod
def parse(cls, option_description):
short, long, argcount, value = None, None, 0, False
options, _, description = option_description.strip().partition(' ')
options = options.replace(',', ' ').replace('=', ' ')
for s in options.split():
if s.startswith('--'):
long = s
elif s.startswith('-'):
short = s
else:
argcount = 1
if argcount:
matched = re.findall('\[default: (.*)\]', description, flags=re.I)
value = matched[0] if matched else None
return cls(short, long, argcount, value)
def single_match(self, left):
for n, p in enumerate(left):
if self.name == p.name:
return n, p
return None, None
@property
def name(self):
return self.long or self.short
def __repr__(self):
return 'Option(%r, %r, %r, %r)' % (self.short, self.long,
self.argcount, self.value)
class Required(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
l = left
c = collected
for p in self.children:
matched, l, c = p.match(l, c)
if not matched:
return False, left, collected
return True, l, c
class Optional(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
for p in self.children:
m, left, collected = p.match(left, collected)
return True, left, collected
class AnyOptions(Optional):
"""Marker/placeholder for [options] shortcut."""
class OneOrMore(ParentPattern):
def match(self, left, collected=None):
assert len(self.children) == 1
collected = [] if collected is None else collected
l = left
c = collected
l_ = None
matched = True
times = 0
while matched:
# could it be that something didn't match but changed l or c?
matched, l, c = self.children[0].match(l, c)
times += 1 if matched else 0
if l_ == l:
break
l_ = l
if times >= 1:
return True, l, c
return False, left, collected
class Either(ParentPattern):
def match(self, left, collected=None):
collected = [] if collected is None else collected
outcomes = []
for p in self.children:
matched, _, _ = outcome = p.match(left, collected)
if matched:
outcomes.append(outcome)
if outcomes:
return min(outcomes, key=lambda outcome: len(outcome[1]))
return False, left, collected
class TokenStream(list):
def __init__(self, source, error):
self += source.split() if hasattr(source, 'split') else source
self.error = error
def move(self):
return self.pop(0) if len(self) else None
def current(self):
return self[0] if len(self) else None
def parse_long(tokens, options):
"""long ::= '--' chars [ ( ' ' | '=' ) chars ] ;"""
long, eq, value = tokens.move().partition('=')
assert long.startswith('--')
value = None if eq == value == '' else value
similar = [o for o in options if o.long == long]
if tokens.error is DocoptExit and similar == []: # if no exact match
similar = [o for o in options if o.long and o.long.startswith(long)]
if len(similar) > 1: # might be simply specified ambiguously 2+ times?
raise tokens.error('%s is not a unique prefix: %s?' %
(long, ', '.join(o.long for o in similar)))
elif len(similar) < 1:
argcount = 1 if eq == '=' else 0
o = Option(None, long, argcount)
options.append(o)
if tokens.error is DocoptExit:
o = Option(None, long, argcount, value if argcount else True)
else:
o = Option(similar[0].short, similar[0].long,
similar[0].argcount, similar[0].value)
if o.argcount == 0:
if value is not None:
raise tokens.error('%s must not have an argument' % o.long)
else:
if value is None:
if tokens.current() is None:
raise tokens.error('%s requires argument' % o.long)
value = tokens.move()
if tokens.error is DocoptExit:
o.value = value if value is not None else True
return [o]
def parse_shorts(tokens, options):
"""shorts ::= '-' ( chars )* [ [ ' ' ] chars ] ;"""
token = tokens.move()
assert token.startswith('-') and not token.startswith('--')
left = token.lstrip('-')
parsed = []
while left != '':
short, left = '-' + left[0], left[1:]
similar = [o for o in options if o.short == short]
if len(similar) > 1:
raise tokens.error('%s is specified ambiguously %d times' %
(short, len(similar)))
elif len(similar) < 1:
o = Option(short, None, 0)
options.append(o)
if tokens.error is DocoptExit:
o = Option(short, None, 0, True)
else: # why copying is necessary here?
o = Option(short, similar[0].long,
similar[0].argcount, similar[0].value)
value = None
if o.argcount != 0:
if left == '':
if tokens.current() is None:
raise tokens.error('%s requires argument' % short)
value = tokens.move()
else:
value = left
left = ''
if tokens.error is DocoptExit:
o.value = value if value is not None else True
parsed.append(o)
return parsed
def parse_pattern(source, options):
tokens = TokenStream(re.sub(r'([\[\]\(\)\|]|\.\.\.)', r' \1 ', source),
DocoptLanguageError)
result = parse_expr(tokens, options)
if tokens.current() is not None:
raise tokens.error('unexpected ending: %r' % ' '.join(tokens))
return Required(*result)
def parse_expr(tokens, options):
"""expr ::= seq ( '|' seq )* ;"""
seq = parse_seq(tokens, options)
if tokens.current() != '|':
return seq
result = [Required(*seq)] if len(seq) > 1 else seq
while tokens.current() == '|':
tokens.move()
seq = parse_seq(tokens, options)
result += [Required(*seq)] if len(seq) > 1 else seq
return [Either(*result)] if len(result) > 1 else result
def parse_seq(tokens, options):
"""seq ::= ( atom [ '...' ] )* ;"""
result = []
while tokens.current() not in [None, ']', ')', '|']:
atom = parse_atom(tokens, options)
if tokens.current() == '...':
atom = [OneOrMore(*atom)]
tokens.move()
result += atom
return result
def parse_atom(tokens, options):
"""atom ::= '(' expr ')' | '[' expr ']' | 'options'
| long | shorts | argument | command ;
"""
token = tokens.current()
result = []
if token in '([':
tokens.move()
matching, pattern = {'(': [')', Required], '[': [']', Optional]}[token]
result = pattern(*parse_expr(tokens, options))
if tokens.move() != matching:
raise tokens.error("unmatched '%s'" % token)
return [result]
elif token == 'options':
tokens.move()
return [AnyOptions()]
elif token.startswith('--') and token != '--':
return parse_long(tokens, options)
elif token.startswith('-') and token not in ('-', '--'):
return parse_shorts(tokens, options)
elif token.startswith('<') and token.endswith('>') or token.isupper():
return [Argument(tokens.move())]
else:
return [Command(tokens.move())]
def parse_argv(tokens, options, options_first=False):
"""Parse command-line argument vector.
If options_first:
argv ::= [ long | shorts ]* [ argument ]* [ '--' [ argument ]* ] ;
else:
argv ::= [ long | shorts | argument ]* [ '--' [ argument ]* ] ;
"""
parsed = []
while tokens.current() is not None:
if tokens.current() == '--':
return parsed + [Argument(None, v) for v in tokens]
elif tokens.current().startswith('--'):
parsed += parse_long(tokens, options)
elif tokens.current().startswith('-') and tokens.current() != '-':
parsed += parse_shorts(tokens, options)
elif options_first:
return parsed + [Argument(None, v) for v in tokens]
else:
parsed.append(Argument(None, tokens.move()))
return parsed
def parse_defaults(doc):
# in python < 2.7 you can't pass flags=re.MULTILINE
split = re.split('\n *(<\S+?>|-\S+?)', doc)[1:]
split = [s1 + s2 for s1, s2 in zip(split[::2], split[1::2])]
options = [Option.parse(s) for s in split if s.startswith('-')]
#arguments = [Argument.parse(s) for s in split if s.startswith('<')]
#return options, arguments
return options
def printable_usage(doc):
# in python < 2.7 you can't pass flags=re.IGNORECASE
usage_split = re.split(r'([Uu][Ss][Aa][Gg][Ee]:)', doc)
if len(usage_split) < 3:
raise DocoptLanguageError('"usage:" (case-insensitive) not found.')
if len(usage_split) > 3:
raise DocoptLanguageError('More than one "usage:" (case-insensitive).')
return re.split(r'\n\s*\n', ''.join(usage_split[1:]))[0].strip()
def formal_usage(printable_usage):
pu = printable_usage.split()[1:] # split and drop "usage:"
return '( ' + ' '.join(') | (' if s == pu[0] else s for s in pu[1:]) + ' )'
def extras(help, version, options, doc):
if help and any((o.name in ('-h', '--help')) and o.value for o in options):
print(doc.strip("\n"))
sys.exit()
if version and any(o.name == '--version' and o.value for o in options):
print(version)
sys.exit()
class Dict(dict):
def __repr__(self):
return '{%s}' % ',\n '.join('%r: %r' % i for i in sorted(self.items()))
def docopt(doc, argv=None, help=True, version=None, options_first=False):
"""Parse `argv` based on command-line interface described in `doc`.
`docopt` creates your command-line interface based on its
description that you pass as `doc`. Such description can contain
--options, <positional-argument>, commands, which could be
[optional], (required), (mutually | exclusive) or repeated...
Parameters
----------
doc : str
Description of your command-line interface.
argv : list of str, optional
Argument vector to be parsed. sys.argv[1:] is used if not
provided.
help : bool (default: True)
Set to False to disable automatic help on -h or --help
options.
version : any object
If passed, the object will be printed if --version is in
`argv`.
options_first : bool (default: False)
Set to True to require options preceed positional arguments,
i.e. to forbid options and positional arguments intermix.
Returns
-------
args : dict
A dictionary, where keys are names of command-line elements
such as e.g. "--verbose" and "<path>", and values are the
parsed values of those elements.
Example
-------
>>> from docopt import docopt
>>> doc = '''
Usage:
my_program tcp <host> <port> [--timeout=<seconds>]
my_program serial <port> [--baud=<n>] [--timeout=<seconds>]
my_program (-h | --help | --version)
Options:
-h, --help Show this screen and exit.
--baud=<n> Baudrate [default: 9600]
'''
>>> argv = ['tcp', '127.0.0.1', '80', '--timeout', '30']
>>> docopt(doc, argv)
{'--baud': '9600',
'--help': False,
'--timeout': '30',
'--version': False,
'<host>': '127.0.0.1',
'<port>': '80',
'serial': False,
'tcp': True}
See also
--------
* For video introduction see http://docopt.org
* Full documentation is available in README.rst as well as online
at https://github.com/docopt/docopt#readme
"""
if argv is None:
argv = sys.argv[1:]
DocoptExit.usage = printable_usage(doc)
options = parse_defaults(doc)
pattern = parse_pattern(formal_usage(DocoptExit.usage), options)
# [default] syntax for argument is disabled
#for a in pattern.flat(Argument):
# same_name = [d for d in arguments if d.name == a.name]
# if same_name:
# a.value = same_name[0].value
argv = parse_argv(TokenStream(argv, DocoptExit), list(options),
options_first)
pattern_options = set(pattern.flat(Option))
for ao in pattern.flat(AnyOptions):
doc_options = parse_defaults(doc)
ao.children = list(set(doc_options) - pattern_options)
#if any_options:
# ao.children += [Option(o.short, o.long, o.argcount)
# for o in argv if type(o) is Option]
extras(help, version, argv, doc)
matched, left, collected = pattern.fix().match(argv)
if matched and left == []: # better error message if left?
return Dict((a.name, a.value) for a in (pattern.flat() + collected))
raise DocoptExit()
| |
# -*- coding: utf-8 -*-
# example used: http://pika.readthedocs.io/en/0.10.0/examples/asynchronous_publisher_example.html
import logging
import pika
import json
LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) '
'-35s %(lineno) -5d: %(message)s')
LOGGER = logging.getLogger(__name__)
class AsyncPublisher(object):
"""This is an example publisher that will handle unexpected interactions
with RabbitMQ such as channel and connection closures.
If RabbitMQ closes the connection, it will reopen it. You should
look at the output, as there are limited reasons why the connection may
be closed, which usually are tied to permission related issues or
socket timeouts.
It uses delivery confirmations and illustrates one way to keep track of
messages that have been sent and if they've been confirmed by RabbitMQ.
"""
def __init__(self, amqp_url, exchange, exchange_type, queue, input_queue=None, publish_interval=1, reconnect_timeout=5, app_id="app_id.notset"):
"""Setup the example publisher object, passing in the URL we will use
to connect to RabbitMQ.
:param str amqp_url: The URL for connecting to RabbitMQ. This should include the vhost Ex .../infp
:param str exchange: Name of the exchange to connect to
:param str exchange_type: Type of exchange. Ex: 'topic'
:param str queue: Name of queue to bind to
:param str routing_key: string for single routing key or list of strings for multiple routing keys
:param queue input_queue: a multiprocess.Queue used to read objects that will be flushed to RabbitMQ
:param int publish_interval: how ofen to check and publish received messages
"""
self._connection = None
self._channel = None
self._deliveries = []
self._acked = 0
self._nacked = 0
self._message_number = 0
self._stopping = False
self._url = amqp_url
self._closing = False
self.exchange = exchange
self.exchange_type = exchange_type
self.queue = queue
self.publish_interval = publish_interval
self.input_queue = input_queue
self.reconnect_timeout = reconnect_timeout
self.app_id = app_id
def connect(self):
"""This method connects to RabbitMQ, returning the connection handle.
When the connection is established, the on_connection_open method
will be invoked by pika. If you want the reconnection to work, make
sure you set stop_ioloop_on_close to False, which is not the default
behavior of this adapter.
:rtype: pika.SelectConnection
"""
LOGGER.info('Connecting to %s', self._url)
return pika.SelectConnection(pika.URLParameters(self._url),
self.on_connection_open,
stop_ioloop_on_close=False)
def on_connection_open(self, unused_connection):
"""This method is called by pika once the connection to RabbitMQ has
been established. It passes the handle to the connection object in
case we need it, but in this case, we'll just mark it unused.
:type unused_connection: pika.SelectConnection
"""
LOGGER.info('Connection opened')
self.add_on_connection_close_callback()
self.open_channel()
def add_on_connection_close_callback(self):
"""This method adds an on close callback that will be invoked by pika
when RabbitMQ closes the connection to the publisher unexpectedly.
"""
LOGGER.info('Adding connection close callback')
self._connection.add_on_close_callback(self.on_connection_closed)
def on_connection_closed(self, connection, reply_code, reply_text):
"""This method is invoked by pika when the connection to RabbitMQ is
closed unexpectedly. Since it is unexpected, we will reconnect to
RabbitMQ if it disconnects.
:param pika.connection.Connection connection: The closed connection obj
:param int reply_code: The server provided reply_code if given
:param str reply_text: The server provided reply_text if given
"""
self._channel = None
if self._closing:
self._connection.ioloop.stop()
else:
LOGGER.warning('Connection closed, reopening in 5 seconds: (%s) %s',
reply_code, reply_text)
self._connection.add_timeout(5, self.reconnect)
def reconnect(self):
"""Will be invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
self._deliveries = []
self._acked = 0
self._nacked = 0
self._message_number = 0
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start()
def open_channel(self):
"""This method will open a new channel with RabbitMQ by issuing the
Channel.Open RPC command. When RabbitMQ confirms the channel is open
by sending the Channel.OpenOK RPC reply, the on_channel_open method
will be invoked.
"""
LOGGER.info('Creating a new channel')
self._connection.channel(on_open_callback=self.on_channel_open)
def on_channel_open(self, channel):
"""This method is invoked by pika when the channel has been opened.
The channel object is passed in so we can make use of it.
Since the channel is now open, we'll declare the exchange to use.
:param pika.channel.Channel channel: The channel object
"""
LOGGER.info('Channel opened')
self._channel = channel
self.add_on_channel_close_callback()
self.setup_exchange(self.exchange)
def add_on_channel_close_callback(self):
"""This method tells pika to call the on_channel_closed method if
RabbitMQ unexpectedly closes the channel.
"""
LOGGER.info('Adding channel close callback')
self._channel.add_on_close_callback(self.on_channel_closed)
def on_channel_closed(self, channel, reply_code, reply_text):
"""Invoked by pika when RabbitMQ unexpectedly closes the channel.
Channels are usually closed if you attempt to do something that
violates the protocol, such as re-declare an exchange or queue with
different parameters. In this case, we'll close the connection
to shutdown the object.
:param pika.channel.Channel: The closed channel
:param int reply_code: The numeric reason the channel was closed
:param str reply_text: The text reason the channel was closed
"""
LOGGER.warning('Channel was closed: (%s) %s', reply_code, reply_text)
if not self._closing:
self._connection.close()
def setup_exchange(self, exchange_name):
"""Setup the exchange on RabbitMQ by invoking the Exchange.Declare RPC
command. When it is complete, the on_exchange_declareok method will
be invoked by pika.
:param str|unicode exchange_name: The name of the exchange to declare
"""
LOGGER.info('Declaring exchange %s', exchange_name)
self._channel.exchange_declare(self.on_exchange_declareok,
exchange_name,
self.exchange_type)
def on_exchange_declareok(self, unused_frame):
"""Invoked by pika when RabbitMQ has finished the Exchange.Declare RPC
command.
:param pika.Frame.Method unused_frame: Exchange.DeclareOk response frame
"""
LOGGER.info('Exchange declared')
self.setup_queue(self.queue)
def setup_queue(self, queue_name):
"""Setup the queue on RabbitMQ by invoking the Queue.Declare RPC
command. When it is complete, the on_queue_declareok method will
be invoked by pika.
:param str|unicode queue_name: The name of the queue to declare.
"""
LOGGER.info('Declaring queue %s', queue_name)
self._channel.queue_declare(self.on_queue_declareok, queue_name)
def on_queue_declareok(self, method_frame):
"""Method invoked by pika when the Queue.Declare RPC call made in
setup_queue has completed. In this method we will bind the queue
and exchange together with the routing key by issuing the Queue.Bind
RPC command. When this command is complete, the on_bindok method will
be invoked by pika.
:param pika.frame.Method method_frame: The Queue.DeclareOk frame
"""
LOGGER.info('Binding %s to %s',
self.exchange, self.queue)
self._channel.queue_bind(self.on_bindok, self.queue,
self.exchange)
def on_bindok(self, unused_frame):
"""This method is invoked by pika when it receives the Queue.BindOk
response from RabbitMQ. Since we know we're now setup and bound, it's
time to start publishing."""
LOGGER.info('Queue bound')
self.start_publishing()
def start_publishing(self):
"""This method will enable delivery confirmations and schedule the
first message to be sent to RabbitMQ
"""
LOGGER.info('Issuing consumer related RPC commands')
self.enable_delivery_confirmations()
self.schedule_next_message()
def enable_delivery_confirmations(self):
"""Send the Confirm.Select RPC method to RabbitMQ to enable delivery
confirmations on the channel. The only way to turn this off is to close
the channel and create a new one.
When the message is confirmed from RabbitMQ, the
on_delivery_confirmation method will be invoked passing in a Basic.Ack
or Basic.Nack method from RabbitMQ that will indicate which messages it
is confirming or rejecting.
"""
LOGGER.info('Issuing Confirm.Select RPC command')
self._channel.confirm_delivery(self.on_delivery_confirmation)
def on_delivery_confirmation(self, method_frame):
"""Invoked by pika when RabbitMQ responds to a Basic.Publish RPC
command, passing in either a Basic.Ack or Basic.Nack frame with
the delivery tag of the message that was published. The delivery tag
is an integer counter indicating the message number that was sent
on the channel via Basic.Publish. Here we're just doing house keeping
to keep track of stats and remove message numbers that we expect
a delivery confirmation of from the list used to keep track of messages
that are pending confirmation.
:param pika.frame.Method method_frame: Basic.Ack or Basic.Nack frame
"""
confirmation_type = method_frame.method.NAME.split('.')[1].lower()
LOGGER.info('Received %s for delivery tag: %i',
confirmation_type,
method_frame.method.delivery_tag)
if confirmation_type == 'ack':
self._acked += 1
elif confirmation_type == 'nack':
self._nacked += 1
self._deliveries.remove(method_frame.method.delivery_tag)
LOGGER.info('Published %i messages, %i have yet to be confirmed, '
'%i were acked and %i were nacked',
self._message_number, len(self._deliveries),
self._acked, self._nacked)
def schedule_next_message(self):
"""If we are not closing our connection to RabbitMQ, schedule another
message to be delivered in PUBLISH_INTERVAL seconds.
"""
if self._stopping:
return
if self.input_queue and (not self.input_queue.empty()):
LOGGER.info('Scheduling publish to rabbitmq')
self._connection.add_timeout(self.publish_interval,
self.publish_message)
else:
self._connection.add_timeout(self.publish_interval,
self.schedule_next_message)
def publish_message(self):
"""If the class is not stopping, publish a message to RabbitMQ,
appending a list of deliveries with the message number that was sent.
This list will be used to check for delivery confirmations in the
on_delivery_confirmations method.
Once the message has been sent, schedule another message to be sent.
The main reason I put scheduling in was just so you can get a good idea
of how the process is flowing by slowing down and speeding up the
delivery intervals by changing the PUBLISH_INTERVAL constant in the
class.
"""
if self._stopping:
return
#if we got disconnected in the meantime then reschedule publishing
if self._channel is None:
LOGGER.info('DISCONNECTED. Rescheduling publish in %fs' % self.reconnect_timeout)
self._connection.add_timeout(self.RECONNECT_TIMEOUT,self.publish_message)
return
routing_key, message = self.input_queue.get()
LOGGER.info('routing key: %s | publishing message: %s' % (routing_key,message) )
properties = pika.BasicProperties(app_id=self.app_id,
content_type='application/json')
self._channel.basic_publish(self.exchange, routing_key,
message,
properties)
self._message_number += 1
self._deliveries.append(self._message_number)
LOGGER.info('Published message # %i', self._message_number)
self.schedule_next_message()
def close_channel(self):
"""Invoke this command to close the channel with RabbitMQ by sending
the Channel.Close RPC command.
"""
LOGGER.info('Closing the channel')
if self._channel:
self._channel.close()
def run(self):
"""Run the example code by connecting and then starting the IOLoop.
"""
self._connection = self.connect()
try:
self._connection.ioloop.start()
except KeyboardInterrupt:
self.stop()
def stop(self):
"""Stop the example by closing the channel and connection. We
set a flag here so that we stop scheduling new messages to be
published. The IOLoop is started because this method is
invoked by the Try/Catch below when KeyboardInterrupt is caught.
Starting the IOLoop again will allow the publisher to cleanly
disconnect from RabbitMQ.
"""
LOGGER.info('Stopping')
self._stopping = True
self.close_channel()
self.close_connection()
self._connection.ioloop.start()
LOGGER.info('Stopped')
def close_connection(self):
"""This method closes the connection to RabbitMQ."""
LOGGER.info('Closing connection')
self._closing = True
self._connection.close()
| |
#!/usr/bin/python
import sys, getopt, urllib, urllib2
#----------------FlagCreator----------------------
# FlagCreator by Matthew Grant
# This program helps a develop in Python'with
# setting up flags/switches for their program
# Just answer a couple of questions and this
# program will write the code for the flags/switches
# that you specified
# -----------------Arguments----------------------
# -h (help)
# -o ouput file created by file creator
#------------------Example ------------------------
# python flagcreator.py -o /root/home/temp/program1.py
#-----------------variables------------------------
outfile=""
flaglist=[]
longnamelist=[]
variablelist=[]
q=0
#-----------------functions-----------------------
def main(argv):
# This function determine what flags was set
try:
opts, args = getopt.getopt(argv, "ho:", ["help","output="])
except getopt.GetoptError:
error1()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-o", "--output="):
global outfile
outfile= arg
print "file = "+outfile
if (outfile==""):
print "[Error] There is an error in the format of filecreator."
print "[Info] Enter in 'filecreator -h' to get help with the format"
sys.exit()
def error1():
print "There was an error in the format of the filecreator option"
print "Enter 'filecreator.py -h for help"
return
def usage():
print "---------------FileCreator-------------------"
print "File Creator 1.0.1 by Matthew Grant"
print "This program helps a developer by automatically writing"
print "the code for flags/switches by just answering a couple"
print "of questions"
print "--------------Options-----------------------"
print "-h, --help display help for using filecreator"
print "-o, --output tells the program where to save the file"
print "note: that if you use '-o display' instead of a filename"
print "the output will go to the screen instead of being save to a file"
print "--------------Example-----------------------"
print "python filecreator.py -h (Displays the help file)"
print "python filecreator.py -o /root/test.py (saves the output to a file name test.py"
print "python filecreator.py -o display (will show the output on the screen and not to a file"
print "---------------------------------------------"
print
return
def getflag():
# This function gets the short-name for the flag and store it in flaglist[]
flag1=0
global flaglist
while (flag1==0):
flag = raw_input("What is the letter for the flag? ")
l=len(flag)
if (l<>1):
print "[Error] The letter for the flag has to be one character long"
else:
print "The flag is -"+flag
flaglist.append(flag)
flag1=1
return flag
def getlongname(flag):
# This function gets the long-name for the flag and store it in longname[]
flag1=0
global longnamelist
while (flag1==0):
longname=raw_input("What is the long name for the flag -"+flag+" Just leave blank if you do not want a longname ")
l=len(longname)
if (l<2 and longname!=""):
print "[Error] The longname of a flag has to be more than one character"
else:
print "The long name for flag -"+flag+" is "+longname
longnamelist.append(longname)
flag1=1
return longname
def getvariable(flag,longname):
# This function gets the variable name that will save the data from the flags
flag1=0
global variablelist
while (flag1==0):
variable=raw_input("What is the variable name for the flag -"+flag+"? ")
l=len(variable)
if (l<2):
print "[Error] The variable of a flag has to be more than one character"
else:
print "The variable for flag -"+flag+" is "+variable
variablelist.append(variable)
flag1=1
return
def getrequired(flag, longname):
# This function sets whether the flag requirs data parameter
flag1=0
global variablelist
while (flag1==0):
required=raw_input("Does this flag has parameter data: (y)es or(n)o ")
if (required=="yes" or required=="y" or required=="YES" or required=="Yes"):
getvariable(flag,longname)
flag1=1
elif (required=="no" or required=="n" or required=="NO" or required=="No"):
print "The flag does not have any data to go with it"
variablelist.append("")
flag1=1
else:
print "Did not recognized the anwser to does this flag has parameter data: (y)es or (n)o"
return
def anotherflag():
# This function determines whether you want to add another flag
flag1=0
global variablelist
while (flag1==0):
required=raw_input("Do you want to add another flag: (y)es or(n)o ")
if (required=="yes" or required=="y" or required=="YES" or required=="Yes"):
againflag=0
flag1=1
elif (required=="no" or required=="n" or required=="NO" or required=="No"):
print "Done adding flags...."
variablelist.append("")
againflag=1
flag1=1
else:
print "Did not recognized the input you entered: (y)es or (n)o"
return againflag
def printoutput():
# This function prints the output file
global flaglist
str1=""
str1+= "#-------------------------------\n"
str1+= "#!/usr/bin/python\n"
str1+= "#-----------------------FlagCreator-------------------------------\n"
str1+= "#Flagcreator by Matthew Grant\n"
str1+= "#flagcreator was used to create the flags/switches in this program\n"
str1+= "#Free to use as you wish\n"
str1+= "#Proud member of the Zeigst-Movement\n"
str1+= "#-----------------------------------------------------------------\n"
str1+= "import sys, getopt\n"
str1+= "#----------Global Variable List---------------\n"
for var1 in variablelist:
if (var1!=""):
str1+= var1+"=''\n"
str1+= "#----------Functions---------------------------\n"
str1+= "def usage():\n"
str1+= "# This is the function that handles the help flag\n"
str1+= " print 'FlagCreator Help'\n"
str1+= " return\n"
str1+= "\n"
str1+= "def main(argv):\n"
str1+= " try:\n"
flagstr=getflagstr()
str1+= " "+flagstr+"\n"
str1+= " except getopt.GetoptError:\n"
str1+= " print 'There was an error in the format of FileCreator option'\n"
str1+= " print 'Enter filecreator.py -h for help'\n"
str1+= " sys.exit(2)\n"
str1+= " for opt, arg in opts:\n"
str1+= " if opt in ('-h', '--help'):\n"
str1+= " usage()\n"
str1+= " sys.exit()\n"
bodystr=getbodystr()
str1+= " "+bodystr+"\n"
str1+= "#-------------Main Body of the Program--------\n"
str1+= "if __name__ == \"__main__\":\n"
str1+= " main(sys.argv[1:])\n"
str1+= "#---------------------------------------------\n"
global outfile
if (outfile=="display" or outfile=="Display" or outfile=="DISPLAY"):
print str1
else:
outtofile(outfile,str1)
return str1
def getbodystr():
# This is a helper function to the printout function, it sets the flag in the program
global flaglist
global variablelist
global longnamelist
c1=0
str1=""
for flag1 in flaglist:
str1+="elif opt in ('-"+flag1+"','--"+longnamelist[c1]+"='):\n"
#print "v="+variablelist[c1]
if (variablelist[c1]!=""):
str1+=" global "+variablelist[c1]+"\n"
str1+=" "+variablelist[c1]+"=arg\n"
str1+=" # replace with a call to a function to handle this flag\n "
c1+=1
return str1
def getflagstr():
# This is a helper function for the printout function it gets the format for the flags in the program.
global flaglist
global variablelist
global longnamelist
c1=0
str1="h"
str2="'help='"
for flag1 in flaglist:
str1+=flag1
if variablelist[c1]!="":
str1+=":"
c1+=1
#print "str="+str1
for longname1 in longnamelist:
str2+="'"+longname1+"=',"
str2=str2[:-1]
strtotal="opts, args = getopt.getopt(argv, '"+str1+"',["+str2+"])"
return strtotal
def outtofile(file1,str1):
# Open a file
try:
fo = open(file1, "w")
fo.write( str1+"\n");
except IOError:
print "I/O Error: could not create the file. Check your path"
else:
# Close opend file
fo.close()
return
#------------------Main Body---------------------------------
if __name__ == "__main__":
main(sys.argv[1:])
print "--------------flag creator -----------------"
print "Just answer the questions and the flags will be set up for you"
print "--------------------------------------------"
while (q==0):
flag=getflag()
longname=getlongname(flag)
getrequired(flag,longname)
again=anotherflag()
if (again==1):
q=1
print "-------------------------------------"
printoutput()
print "Finished!!!"
| |
# Licensed under the MIT license
# http://opensource.org/licenses/mit-license.php
# Copyright 2006-2009 Frank Scholz <coherence@beebits.net>
# Modified by Colin Laplace, added is_watched() function
# Copyright 2008 Adroll.com and Valentino Volonghi <dialtone@adroll.com>
# Modified by Valentino Volonghi.
# * Increased isWatched efficiency, now it's O(1)
# * Code reorganization, added docstrings, Twisted coding style
# * Introduced an hack to partially solve a race condition in auto_add.
# * Removed code that didn't use libc 2.4 but magic hacks
# -> reverted, as it might still be needed somewhere (fs)
# * Used fdesc.readFromFD to read during doRead
import os
import struct
try:
import ctypes
import ctypes.util
except ImportError:
raise SystemError("ctypes not detected on this system, can't use INotify")
from twisted.internet import reactor
from twisted.internet.abstract import FileDescriptor
from twisted.internet import fdesc
from twisted.python.filepath import FilePath
# from /usr/src/linux/include/linux/inotify.h
IN_ACCESS = 0x00000001L # File was accessed
IN_MODIFY = 0x00000002L # File was modified
IN_ATTRIB = 0x00000004L # Metadata changed
IN_CLOSE_WRITE = 0x00000008L # Writtable file was closed
IN_CLOSE_NOWRITE = 0x00000010L # Unwrittable file closed
IN_OPEN = 0x00000020L # File was opened
IN_MOVED_FROM = 0x00000040L # File was moved from X
IN_MOVED_TO = 0x00000080L # File was moved to Y
IN_CREATE = 0x00000100L # Subfile was created
IN_DELETE = 0x00000200L # Subfile was delete
IN_DELETE_SELF = 0x00000400L # Self was deleted
IN_MOVE_SELF = 0x00000800L # Self was moved
IN_UNMOUNT = 0x00002000L # Backing fs was unmounted
IN_Q_OVERFLOW = 0x00004000L # Event queued overflowed
IN_IGNORED = 0x00008000L # File was ignored
IN_ONLYDIR = 0x01000000 # only watch the path if it is a directory
IN_DONT_FOLLOW = 0x02000000 # don't follow a sym link
IN_MASK_ADD = 0x20000000 # add to the mask of an already existing watch
IN_ISDIR = 0x40000000 # event occurred against dir
IN_ONESHOT = 0x80000000 # only send event once
IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # closes
IN_MOVED = IN_MOVED_FROM | IN_MOVED_TO # moves
IN_CHANGED = IN_MODIFY | IN_ATTRIB # changes
IN_WATCH_MASK = IN_MODIFY | IN_ATTRIB | \
IN_CREATE | IN_DELETE | \
IN_DELETE_SELF | IN_MOVE_SELF | \
IN_UNMOUNT | IN_MOVED_FROM | IN_MOVED_TO
_FLAG_TO_HUMAN = {
IN_ACCESS: 'access',
IN_MODIFY: 'modify',
IN_ATTRIB: 'attrib',
IN_CLOSE_WRITE: 'close_write',
IN_CLOSE_NOWRITE: 'close_nowrite',
IN_OPEN: 'open',
IN_MOVED_FROM: 'moved_from',
IN_MOVED_TO: 'moved_to',
IN_CREATE: 'create',
IN_DELETE: 'delete',
IN_DELETE_SELF: 'delete_self',
IN_MOVE_SELF: 'move_self',
IN_UNMOUNT: 'unmount',
IN_Q_OVERFLOW: 'queue_overflow',
IN_IGNORED: 'ignored',
IN_ONLYDIR: 'only_dir',
IN_DONT_FOLLOW: 'dont_follow',
IN_MASK_ADD: 'mask_add',
IN_ISDIR: 'is_dir',
IN_ONESHOT: 'one_shot'
}
# system call numbers are architecture-specific
# see /usr/include/linux/asm/unistd.h and look for inotify
_inotify_syscalls = { 'i386': (291,292,293), # FIXME, there has to be a better way for this
'i486': (291,292,293),
'i586': (291,292,293),
'i686': (291,292,293),
'x86_64': (253,254,255), # gotten from FC-6 and F-7
'armv6l':(316,317,318), # Nokia N800
'armv5tej1':(316,317,318), # Nokia N770
'ppc': (275,276,277), # PPC, like PS3
}
def flag_to_human(mask):
"""
Auxiliary function that converts an hexadecimal mask into a series
of human readable flags.
"""
s = []
for (k, v) in _FLAG_TO_HUMAN.iteritems():
if k & mask:
s.append(v)
return s
class Watch(object):
"""
Watch object that represents a Watch point in the filesystem.
@ivar path: The path over which this watch point is monitoring
@ivar mask: The events monitored by this watchpoint
@ivar auto_add: Flag that determines whether this watch point
should automatically add created subdirectories
@ivar callbacks: C{list} of C{tuples} of callbacks that should be
called synchronously on the events monitored.
"""
def __init__(self, path, mask=IN_WATCH_MASK, auto_add=False, callbacks=[]):
self.path = path
self.mask = mask
self.auto_add = auto_add
self.callbacks = []
if not isinstance(callbacks, list):
callbacks = [callbacks]
self.callbacks = callbacks
def addCallback(self, callback, args=None):
"""
Add a new callback to the list with the given auxiliary
optional argument.
"""
self.callbacks.append((callback, args))
def notify(self, filename, events):
"""
Callback function used by L{INotify} to dispatch an event.
"""
for callback in self.callbacks:
if callback is not None:
#wrap that so our loop isn't aborted by a faulty callback
try:
callback[0](self, filename, events, callback[1])
except:
import traceback
traceback.print_exc()
class INotify(FileDescriptor, object):
"""
The INotify file descriptor, it basically does everything related
to INotify, from reading to notifying watch points.
"""
_instance_ = None # Singleton
def __new__(cls, *args, **kwargs):
obj = getattr(cls, '_instance_', None)
if obj is not None:
return obj
else:
obj = super(INotify, cls).__new__(cls, *args, **kwargs)
# Check inotify support by checking for the required functions
obj.libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
if len([function for function in "inotify_add_watch inotify_init inotify_rm_watch".split() if hasattr(obj.libc, function)]) == 3:
obj.inotify_init = obj.libc.inotify_init
obj.inotify_add_watch = obj.libc_inotify_add_watch
obj.inotify_rm_watch = obj.libc_inotify_rm_watch
else:
print("inotify.py - can't use libc6, 2.4 or higher needed")
import platform
if platform.system() != 'Linux':
raise SystemError("unknown system '%r', INotify support disabled" % platform.uname())
machine = platform.machine()
try:
obj._init_syscall_id = _inotify_syscalls[machine][0]
obj._add_watch_syscall_id = _inotify_syscalls[machine][1]
obj._rm_watch_syscall_id = _inotify_syscalls[machine][2]
obj.inotify_init = obj._inotify_init
obj.inotify_add_watch = obj._inotify_add_watch
obj.inotify_rm_watch = obj._inotify_rm_watch
except:
raise SystemError("unknown system '%s', INotify support disabled" % machine)
FileDescriptor.__init__(obj)
obj._fd = obj.inotify_init()
if obj._fd < 0:
raise SystemError("INotify initialization error.")
fdesc.setNonBlocking(obj._fd)
reactor.addReader(obj)
obj._buffer = ''
# Mapping from wds to Watch objects
obj._watchpoints = {}
# Mapping from paths to wds
obj._watchpaths = {}
cls._instance_ = obj
return obj
def _addWatch(self, path, mask, auto_add, callbacks):
"""
Private helpers that abstract the use of ctypes and help
managing state related to those calls.
"""
wd = self.inotify_add_watch(
os.path.normpath(path),
mask
)
if wd < 0:
raise IOError("Failed to add watch on '%r' - (%r)" % (path, wd))
iwp = Watch(path, mask, auto_add, callbacks)
self._watchpoints[wd] = iwp
self._watchpaths[path] = wd
return wd
def _rmWatch(self, wd):
"""
Private helpers that abstract the use of ctypes and help
managing state related to those calls.
"""
self.inotify_rm_watch(wd)
iwp = self._watchpoints.pop(wd)
self._watchpaths.pop(iwp.path)
del iwp
def _inotify_init(self):
return self.libc.syscall(self._init_syscall_id)
def _inotify_add_watch(self, path, mask):
if type(path) is unicode:
path = path.encode('utf-8')
self.libc.syscall.argtypes = [ctypes.c_int, ctypes.c_int, ctypes.c_char_p, ctypes.c_int]
else:
self.libc.syscall.argtypes = None
return self.libc.syscall(self._add_watch_syscall_id, self._fd, path, mask)
def _inotify_rm_watch(self, wd):
return self.libc.syscall(self._rm_watch_syscall_id, self._fd, wd)
def libc_inotify_add_watch(self, path, mask):
if type(path) is unicode:
path = path.encode('utf-8')
self.libc.inotify_add_watch.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int]
else:
self.libc.inotify_add_watch.argtypes = None
return self.libc.inotify_add_watch(self._fd, path, mask)
def libc_inotify_rm_watch(self, wd):
return self.libc.inotify_rm_watch(self._fd, wd)
def release(self):
"""
Release the inotify file descriptor and do the necessary cleanup
"""
reactor.removeReader(self)
if hasattr(self, '_fd') and self._fd >= 0:
try:
os.close(self._fd)
except OSError:
pass
if hasattr(INotify, '_instance_'):
del INotify._instance_
# I'd rather not have this...
__del__ = release
def fileno(self):
"""
Get the underlying file descriptor from this inotify observer.
"""
return self._fd
def notify(self, iwp, filename, mask, *args):
"""
A simple callback that you can use for tests
"""
print "event %s on %s %s" % (
', '.join(flag_to_human(mask)), iwp.path, filename)
def doRead(self):
"""
Read some data from the observed file descriptors
"""
fdesc.readFromFD(self._fd, self._doRead)
def _doRead(self, in_):
"""
Work on the data just read from the file descriptor.
"""
self._buffer += in_
while True:
if len(self._buffer) < 16:
break
wd, mask, cookie, size = struct.unpack("=LLLL", self._buffer[0:16])
if size:
name = self._buffer[16:16+size].rstrip('\0')
else:
name = None
self._buffer = self._buffer[16+size:]
try:
iwp = self._watchpoints[wd]
except:
continue # can this happen?
path = iwp.path
if name:
path = os.path.join(path, name)
iwp.notify(name, mask)
else:
iwp.notify(path, mask)
if (iwp.auto_add and mask & IN_ISDIR and mask & IN_CREATE):
# Note that this is a fricking hack... it's because we
# cannot be fast enough in adding a watch to a directory
# and so we basically end up getting here too late if
# some operations have already been going on in the
# subdir, we basically need to catchup.
# This eventually ends up meaning that we generate
# double events, your app must be resistant.
def _addChildren(iwp):
try:
listdir = os.listdir(iwp.path)
except OSError:
# Somebody or something (like a test)
# removed this directory while we were in the
# callLater(0...) waiting. It doesn't make
# sense to process it anymore
return
# note that it's true that listdir will only see
# the subdirs inside path at the moment of the call
# but path is monitored already so if something is
# created we will receive an event.
for f in listdir:
inner = os.path.join(iwp.path, f)
# It's a directory, watch it and then add its
# children
if os.path.isdir(inner):
wd = self.watch(
inner, mask=iwp.mask, auto_add=True,
callbacks=iwp.callbacks
)
iwp.notify(f, IN_ISDIR|IN_CREATE)
# now inner is watched, we can add its children
# the callLater is to avoid recursion
reactor.callLater(0,
_addChildren, self._watchpoints[wd])
# It's a file and we notify it.
if os.path.isfile(inner):
iwp.notify(f, IN_CREATE|IN_CLOSE_WRITE)
if os.path.isdir(path):
new_wd = self.watch(
path, mask=iwp.mask, auto_add=True,
callbacks=iwp.callbacks
)
# This is very very very hacky and I'd rather
# not do this but we have no other alternative
# that is less hacky other than surrender
# We use callLater because we don't want to have
# too many events waiting while we process these
# subdirs, we must always answer events as fast
# as possible or the overflow might come.
reactor.callLater(0,
_addChildren, self._watchpoints[new_wd])
if mask & IN_DELETE_SELF:
self._rmWatch(wd)
def watch(self, path, mask=IN_WATCH_MASK, auto_add=None, callbacks=[], recursive=False):
"""
Watch the 'mask' events in given path.
@param path: The path needing monitoring
@type path: L{FilePath} or C{str} or C{unicode}
@param mask: The events that should be watched
@type mask: C{hex}
@param auto_add: if True automatically add newly created
subdirectories
@type auto_add: C{boolean}
@param callbacks: A list of callbacks that should be called
when an event happens in the given path.
@type callbacks: C{list} of C{tuples}
@param recursive: Also add all the subdirectories in this path
@type recursive: C{boolean}
"""
if isinstance(path, FilePath):
path = path.path
if type(path) is unicode:
path = path.encode('utf-8')
path = os.path.realpath(path)
if recursive:
for root, dirs, files in os.walk(path):
self.watch(root, mask, auto_add, callbacks, False)
else:
wd = self.isWatched(path)
if wd:
return wd
mask = mask | IN_DELETE_SELF
return self._addWatch(path, mask, auto_add, callbacks)
def ignore(self, path):
"""
Remove the watch point monitoring the given path
@param path: The path that should be ignored
@type path: L{FilePath} or C{unicode} or C{str}
"""
if isinstance(path, FilePath):
path = path.path
if type(path) is unicode:
path = path.encode('utf-8')
path = os.path.realpath(path)
wd = self.isWatched(path)
if wd:
self._rmWatch(wd)
def isWatched(self, path):
"""
Helper function that checks if the path is already monitored
and returns its watchdescriptor if so.
@param path: The path that should be checked
@type path: L{FilePath} or C{unicode} or C{str}
"""
if isinstance(path, FilePath):
path = path.path
if type(path) is unicode:
path = path.encode('utf-8')
return self._watchpaths.get(path, False)
def flag_to_human(self,mask):
return flag_to_human(mask)
if __name__ == '__main__':
i = INotify()
print i
i.watch(unicode('/tmp'), auto_add=True, callbacks=(i.notify,None), recursive=True)
i2 = INotify()
print i2
i2.watch('/', auto_add=True, callbacks=(i2.notify,None), recursive=False)
reactor.run()
| |
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from testtools.matchers import HasLength
from ironic.api.controllers.v1 import driver
from ironic.common import exception
from ironic.conductor import rpcapi
from ironic.tests.api import base
class TestListDrivers(base.FunctionalTest):
d1 = 'fake-driver1'
d2 = 'fake-driver2'
h1 = 'fake-host1'
h2 = 'fake-host2'
def register_fake_conductors(self):
self.dbapi.register_conductor({
'hostname': self.h1,
'drivers': [self.d1, self.d2],
})
self.dbapi.register_conductor({
'hostname': self.h2,
'drivers': [self.d2],
})
def test_drivers(self):
self.register_fake_conductors()
expected = sorted([
{'name': self.d1, 'hosts': [self.h1]},
{'name': self.d2, 'hosts': [self.h1, self.h2]},
])
data = self.get_json('/drivers')
self.assertThat(data['drivers'], HasLength(2))
drivers = sorted(data['drivers'])
for i in range(len(expected)):
d = drivers[i]
self.assertEqual(expected[i]['name'], d['name'])
self.assertEqual(sorted(expected[i]['hosts']), sorted(d['hosts']))
self.validate_link(d['links'][0]['href'])
self.validate_link(d['links'][1]['href'])
def test_drivers_no_active_conductor(self):
data = self.get_json('/drivers')
self.assertThat(data['drivers'], HasLength(0))
self.assertEqual([], data['drivers'])
def test_drivers_get_one_ok(self):
self.register_fake_conductors()
data = self.get_json('/drivers/%s' % self.d1)
self.assertEqual(self.d1, data['name'])
self.assertEqual([self.h1], data['hosts'])
self.validate_link(data['links'][0]['href'])
self.validate_link(data['links'][1]['href'])
def test_drivers_get_one_not_found(self):
response = self.get_json('/drivers/%s' % self.d1, expect_errors=True)
self.assertEqual(404, response.status_int)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_sync(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
mocked_driver_vendor_passthru.return_value = ({
'return_key': 'return_value',
}, False)
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1,
{'test_key': 'test_value'})
self.assertEqual(200, response.status_int)
self.assertEqual(mocked_driver_vendor_passthru.return_value[0],
response.json)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_async(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
mocked_driver_vendor_passthru.return_value = (None, True)
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1,
{'test_key': 'test_value'})
self.assertEqual(202, response.status_int)
self.assertIsNone(mocked_driver_vendor_passthru.return_value[0])
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_put(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
return_value = (None, 'async')
mocked_driver_vendor_passthru.return_value = return_value
response = self.put_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1,
{'test_key': 'test_value'})
self.assertEqual(202, response.status_int)
self.assertEqual(return_value[0], response.json)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_get(self, mocked_driver_vendor_passthru):
self.register_fake_conductors()
return_value = ('foo', 'sync')
mocked_driver_vendor_passthru.return_value = return_value
response = self.get_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1)
self.assertEqual(return_value[0], response)
@mock.patch.object(rpcapi.ConductorAPI, 'driver_vendor_passthru')
def test_driver_vendor_passthru_delete(self, mock_driver_vendor_passthru):
self.register_fake_conductors()
return_value = (None, 'async')
mock_driver_vendor_passthru.return_value = return_value
response = self.delete(
'/drivers/%s/vendor_passthru/do_test' % self.d1)
self.assertEqual(202, response.status_int)
self.assertEqual(return_value[0], response.json)
def test_driver_vendor_passthru_driver_not_found(self):
# tests when given driver is not found
# e.g. get_topic_for_driver fails to find the driver
response = self.post_json(
'/drivers/%s/vendor_passthru/do_test' % self.d1,
{'test_key': 'test_value'},
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_driver_vendor_passthru_method_not_found(self):
response = self.post_json(
'/drivers/%s/vendor_passthru' % self.d1,
{'test_key': 'test_value'},
expect_errors=True)
self.assertEqual(400, response.status_int)
error = json.loads(response.json['error_message'])
self.assertEqual('Missing argument: "method"',
error['faultstring'])
@mock.patch.object(rpcapi.ConductorAPI,
'get_driver_vendor_passthru_methods')
def test_driver_vendor_passthru_methods(self, get_methods_mock):
self.register_fake_conductors()
return_value = {'foo': 'bar'}
get_methods_mock.return_value = return_value
path = '/drivers/%s/vendor_passthru/methods' % self.d1
data = self.get_json(path)
self.assertEqual(return_value, data)
get_methods_mock.assert_called_once_with(mock.ANY, self.d1,
topic=mock.ANY)
# Now let's test the cache: Reset the mock
get_methods_mock.reset_mock()
# Call it again
data = self.get_json(path)
self.assertEqual(return_value, data)
# Assert RPC method wasn't called this time
self.assertFalse(get_methods_mock.called)
@mock.patch.object(rpcapi.ConductorAPI, 'get_driver_properties')
@mock.patch.object(rpcapi.ConductorAPI, 'get_topic_for_driver')
class TestDriverProperties(base.FunctionalTest):
def test_driver_properties_fake(self, mock_topic, mock_properties):
# Can get driver properties for fake driver.
driver._DRIVER_PROPERTIES = {}
driver_name = 'fake'
mock_topic.return_value = 'fake_topic'
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
data = self.get_json('/drivers/%s/properties' % driver_name)
self.assertEqual(mock_properties.return_value, data)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
self.assertEqual(mock_properties.return_value,
driver._DRIVER_PROPERTIES[driver_name])
def test_driver_properties_cached(self, mock_topic, mock_properties):
# only one RPC-conductor call will be made and the info cached
# for subsequent requests
driver._DRIVER_PROPERTIES = {}
driver_name = 'fake'
mock_topic.return_value = 'fake_topic'
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
data = self.get_json('/drivers/%s/properties' % driver_name)
data = self.get_json('/drivers/%s/properties' % driver_name)
data = self.get_json('/drivers/%s/properties' % driver_name)
self.assertEqual(mock_properties.return_value, data)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
self.assertEqual(mock_properties.return_value,
driver._DRIVER_PROPERTIES[driver_name])
def test_driver_properties_invalid_driver_name(self, mock_topic,
mock_properties):
# Cannot get driver properties for an invalid driver; no RPC topic
# exists for it.
driver._DRIVER_PROPERTIES = {}
driver_name = 'bad_driver'
mock_topic.side_effect = exception.DriverNotFound(
driver_name=driver_name)
mock_properties.return_value = {'prop1': 'Property 1. Required.'}
ret = self.get_json('/drivers/%s/properties' % driver_name,
expect_errors=True)
self.assertEqual(404, ret.status_int)
mock_topic.assert_called_once_with(driver_name)
self.assertFalse(mock_properties.called)
def test_driver_properties_cannot_load(self, mock_topic, mock_properties):
# Cannot get driver properties for the driver. Although an RPC topic
# exists for it, the conductor wasn't able to load it.
driver._DRIVER_PROPERTIES = {}
driver_name = 'driver'
mock_topic.return_value = 'driver_topic'
mock_properties.side_effect = exception.DriverNotFound(
driver_name=driver_name)
ret = self.get_json('/drivers/%s/properties' % driver_name,
expect_errors=True)
self.assertEqual(404, ret.status_int)
mock_topic.assert_called_once_with(driver_name)
mock_properties.assert_called_once_with(mock.ANY, driver_name,
topic=mock_topic.return_value)
| |
import inspect
import json
import logging
from json import JSONDecodeError
from pathlib import Path
from typing import Optional, TextIO, Union, ChainMap, List, Tuple, Dict, TYPE_CHECKING
import asyncio
import itertools
import sys
import jsonschema
from lightbus.exceptions import (
InvalidApiForSchemaCreation,
InvalidSchema,
SchemaNotFound,
ValidationError,
RemoteSchemasNotLoaded,
RemoteSchemasNotLoaded,
)
from lightbus.schema.encoder import json_encode
from lightbus.schema.hints_to_schema import (
make_response_schema,
make_rpc_parameter_schema,
make_event_parameter_schema,
)
from lightbus.transports.registry import SchemaTransportPoolType
from lightbus.utilities.io import make_file_safe_api_name
from lightbus.api import Api, Event
from lightbus.utilities.type_checks import is_optional
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus.transports.base import SchemaTransport
from lightbus.transports.pool import TransportPool
logger = logging.getLogger(__name__)
class Schema:
"""Represents the bus' schema
Note that the presence of a schema does not necessarily
indicate that a lightbus process is present or ready to serve
requests for the API. For that you will need to consume the events
produced by the state plugin.
That being said, you should expect old schemas to be dropped
after max_age_seconds.
"""
def __init__(
self,
schema_transport: "SchemaTransportPoolType",
max_age_seconds: Optional[int] = 60,
human_readable: bool = True,
):
self.schema_transport = schema_transport
self._schema_transport: Optional["SchemaTransport"] = None
self.max_age_seconds = max_age_seconds
self.human_readable = human_readable
# Schemas which have been provided locally. These will either be locally-available
# APIs, or schemas which have been loaded from local files
self.local_schemas = {}
# Schemas which have been retrieved from the bus. This will also contain local
# schemas which have been stored onto the bus. The storing and retrieving of
# remote schemas is mediated by the schema transport.
self._remote_schemas: Optional[Dict[str, dict]] = None
def __contains__(self, item):
return item in self.local_schemas or item in self.remote_schemas
async def add_api(self, api: "Api"):
"""Adds an API locally, and sends to the transport"""
schema = api_to_schema(api)
self.local_schemas[api.meta.name] = schema
await self.schema_transport.store(api.meta.name, schema, ttl_seconds=self.max_age_seconds)
def get_api_schema(self, api_name) -> Optional[dict]:
"""Get the schema for the given API"""
api_schema = self.local_schemas.get(api_name) or self.remote_schemas.get(api_name)
if not api_schema:
# TODO: Add link to docs in error message
raise SchemaNotFound(
"No schema could be found for API {}. You should ensure that either this API is"
" being served by another lightbus process, or you can load this schema manually."
.format(api_name)
)
return api_schema
def get_event_schema(self, api_name, event_name):
event_schemas = self.get_api_schema(api_name)["events"]
try:
return event_schemas[event_name]
except KeyError:
raise SchemaNotFound(
"Found schema for API '{}', but it did not contain an event named '{}'".format(
api_name, event_name
)
)
def get_rpc_schema(self, api_name, rpc_name):
rpc_schemas = self.get_api_schema(api_name)["rpcs"]
try:
return rpc_schemas[rpc_name]
except KeyError:
raise SchemaNotFound(
"Found schema for API '{}', but it did not contain a RPC named '{}'".format(
api_name, rpc_name
)
)
def get_event_or_rpc_schema(self, api_name, name):
try:
return self.get_event_schema(api_name, name)
except SchemaNotFound:
pass
try:
return self.get_rpc_schema(api_name, name)
except SchemaNotFound:
pass
# TODO: Add link to docs in error message
raise SchemaNotFound(
"No schema found for '{}' on API '{}'. You should either, a) ensure this "
"API is being served by another lightbus process, or b) load this schema manually."
"".format(name, api_name)
)
def validate_parameters(self, api_name, event_or_rpc_name, parameters):
"""Validate the parameters for the given event/rpc
This will raise an `jsonschema.ValidationError` exception on error,
or return None if valid.
"""
json_schema = self.get_event_or_rpc_schema(api_name, event_or_rpc_name)["parameters"]
try:
jsonschema.validate(parameters, json_schema)
except jsonschema.ValidationError as e:
logger.error(e)
path = list(e.absolute_path)
if not path:
raise ValidationError(
"Validation error when using JSON schema to validate parameters for \n"
f"{api_name}.{event_or_rpc_name}.\n"
"\n"
"It is likely you have included an unwanted parameter or omitted a required \n"
"parameter.\n"
"\n"
f"The error was: {e.message}\n"
"\n"
"The full validator error was logged above"
) from None
elif len(path) == 1:
raise ValidationError(
"Validation error when using JSON schema to validate parameters for \n"
f"{api_name}.{event_or_rpc_name}.\n"
"\n"
"It is likely that you have passed in an invalid value for the \n"
f"'{path[0]}' parameter.\n"
"\n"
f"The error given was: {e.message}\n"
"\n"
"The full validator error was logged above"
) from None
else:
raise ValidationError(
"Validation error when using JSON schema to validate parameters for \n"
f"{api_name}.{event_or_rpc_name}.\n"
"\n"
"This was an error in validating the internal structure of one \n"
"of the parameters' values. The path to this error is \n"
f"'<root>.{'.'.join(e.absolute_path)}'.\n"
"\n"
f"The error given was: {e.message}\n"
"\n"
"The full validator error was logged above"
) from None
def validate_response(self, api_name, rpc_name, response):
"""Validate the parameters for the given event/rpc
This will raise an `jsonschema.ValidationError` exception on error,
or return None if valid.
Note that only RPCs have responses. Accessing this property for an
event will result in a SchemaNotFound error.
"""
json_schema = self.get_rpc_schema(api_name, rpc_name)["response"]
try:
jsonschema.validate(response, json_schema)
except jsonschema.ValidationError as e:
logger.error(e)
path = list(e.absolute_path)
if not path:
raise ValidationError(
"Validation error when using JSON schema to validate result from \n"
f"RPC {api_name}.{rpc_name}.\n"
"\n"
"It is likely the response was either of the incorrect type, or "
"some fields were erroneously absent/present.\n"
"\n"
f"The error was: {e.message}\n"
"\n"
"The full validator error was logged above"
) from None
else:
raise ValidationError(
"Validation error when using JSON schema to validate result from \n"
f"RPC {api_name}.{rpc_name}.\n"
"\n"
"This was an error in validating the internal structure of the \n"
"data returned values. The path to this error is \n"
f"'<root>.{'.'.join(e.absolute_path)}'.\n"
"\n"
f"The error given was: {e.message}\n"
"\n"
"The full validator error was logged above"
) from None
@property
def api_names(self) -> List[str]:
return list(set(itertools.chain(self.local_schemas.keys(), self.remote_schemas.keys())))
@property
def events(self) -> List[Tuple[str, str]]:
"""Get a list of all events available on the bus
Each event is a tuple in the form `(api_name, event_name)`
"""
events = []
for api_name in self.api_names:
api_schema = self.get_api_schema(api_name)
if api_schema:
for event_name in api_schema["events"].keys():
events.append((api_name, event_name))
return events
@property
def rpcs(self) -> List[Tuple[str, str]]:
"""Get a list of all RPCs available on the bus
Each rpc is a tuple in the form `(api_name, rpc_name)`
"""
rpcs = []
for api_name in self.api_names:
api_schema = self.get_api_schema(api_name)
if api_schema:
for event_name in api_schema["rpcs"].keys():
rpcs.append((api_name, event_name))
return rpcs
async def save_to_bus(self):
"""Save the schema onto the bus
This will be done using the `schema_transport` provided to `__init__()`
"""
for api_name, schema in self.local_schemas.items():
await self.schema_transport.store(api_name, schema, ttl_seconds=self.max_age_seconds)
async def load_from_bus(self):
"""Save the schema from the bus
This will be done using the `schema_transport` provided to `__init__()`
"""
self._remote_schemas = await self.schema_transport.load()
async def ensure_loaded_from_bus(self):
if self._remote_schemas is None:
await self.load_from_bus()
@property
def remote_schemas(self) -> Dict[str, Dict]:
"""Schemas which have been retrieved from the bus.
This will also contain local schemas which have been stored onto the bus. \
The storing and retrieving of remote schemas is mediated by the schema transport.
The returned value is a dictionary where keys are fully qualified API names,
and the values are JSON schemas
"""
if self._remote_schemas is None:
raise RemoteSchemasNotLoaded(
"The remote schemas have not yet been loaded. Lightbus should have ensured this was"
" done already, and therefore this is likely a bug. However, calling"
" bus.client.lazy_load_now() should resolve this."
)
return self._remote_schemas
async def monitor(self, interval=None):
"""Monitor for remote schema changes and keep any local schemas alive on the bus"""
interval = interval or self.max_age_seconds * 0.8
try:
while True:
await asyncio.sleep(interval)
# Keep alive our local schemas
for api_name, schema in self.local_schemas.items():
await self.schema_transport.ping(
api_name, schema, ttl_seconds=self.max_age_seconds
)
# Read the entire schema back from the bus
await self.load_from_bus()
except asyncio.CancelledError:
return
def save_local(self, destination: Union[str, Path, TextIO] = None):
"""Save all present schemas to a local file
This will save both local & remote schemas to a local file
"""
if isinstance(destination, str):
destination = Path(destination)
if destination is None:
self._dump_to_file(sys.stdout)
sys.stdout.write("\n")
elif destination.is_dir():
self._dump_to_directory(destination)
else:
with destination.open("w", encoding="utf8") as f:
self._dump_to_file(f)
def load_local(self, source: Union[str, Path, TextIO] = None):
"""Load schemas from a local file
These files will be treated as local schemas, and will not be sent to the bus.
This can be useful for validation during development and testing.
"""
if isinstance(source, str):
source = Path(source)
def _load_schema(path, file_data):
try:
return json.loads(file_data)
except JSONDecodeError as e:
raise InvalidSchema("Could not parse schema file {}: {}".format(path, e.msg))
if source is None:
# No source, read from stdin
schema = _load_schema("[stdin]", sys.stdin.read())
elif hasattr(source, "is_dir") and source.is_dir():
# Read each json file in directory
schemas = []
for file_path in source.glob("*.json"):
schemas.append(_load_schema(file_path, file_path.read_text(encoding="utf8")))
schema = ChainMap(*schemas)
elif hasattr(source, "read"):
# Read file handle
schema = _load_schema(source.name, source.read())
elif hasattr(source, "read_text"):
# Read pathlib Path
schema = _load_schema(source.name, source.read_text())
else:
raise InvalidSchema(
"Did not recognise provided source as either a "
"directory path, file path, or file handle: {}".format(source)
)
for api_name, api_schema in schema.items():
self.local_schemas[api_name] = api_schema
return schema
def _dump_to_directory(self, destination: Path):
for api_name in self.api_names:
file_name = "{}.json".format(make_file_safe_api_name(api_name))
(destination / file_name).write_text(self._get_dump(api_name), encoding="utf8")
def _dump_to_file(self, f):
f.write(self._get_dump())
def _get_dump(self, api_name=None):
if api_name:
schema = {api_name: self.get_api_schema(api_name)}
else:
schema = {api_name: self.get_api_schema(api_name) for api_name in self.api_names}
indent = 2 if self.human_readable else None
return json_encode(schema, indent=indent)
async def close(self):
await self.schema_transport.close()
class Parameter(inspect.Parameter):
"""Describes the name and type of an event parameter"""
empty = inspect.Parameter.empty
def __init__(self, name, annotation=empty, *, default=empty):
super(Parameter, self).__init__(
name, inspect.Parameter.KEYWORD_ONLY, default=default, annotation=annotation
)
@property
def is_required(self):
return self.default is self.empty and not is_optional(self.annotation)
class WildcardParameter(inspect.Parameter):
"""Describes a **kwargs style parameter to an event"""
def __init__(self):
super(WildcardParameter, self).__init__(
name="kwargs", kind=inspect.Parameter.VAR_KEYWORD, default={}, annotation=dict
)
def api_to_schema(api: "lightbus.Api") -> dict:
"""Produce a lightbus schema for the given API"""
schema = {"rpcs": {}, "events": {}}
if isinstance(api, type):
raise InvalidApiForSchemaCreation(
"An attempt was made to derive an API schema from a type/class, rather than "
"from an instance of an API. This is probably because you are passing an API "
"class to api_to_schema(), rather than an instance of the API class."
)
for member_name, member in inspect.getmembers(api):
if member_name.startswith("_"):
# Don't create schema from private methods
continue
if hasattr(Api, member_name):
# Don't create schema for methods defined on Api class
continue
if inspect.ismethod(member):
schema["rpcs"][member_name] = {
"parameters": make_rpc_parameter_schema(api.meta.name, member_name, method=member),
"response": make_response_schema(api.meta.name, member_name, method=member),
}
elif isinstance(member, Event):
schema["events"][member_name] = {
"parameters": make_event_parameter_schema(api.meta.name, member_name, event=member)
}
return schema
| |
#!/usr/bin/env python
"""
dev.py [OPTIONS] [-- ARGS]
Run tests, building the project first with Meson
Examples::
$ python dev.py
$ python dev.py -s {SAMPLE_SUBMODULE}
$ python dev.py -t {SAMPLE_TEST}
$ python dev.py --ipython
$ python dev.py --python somescript.py
$ python dev.py --bench
$ python dev.py --no-build --bench signal.LTI
Run a debugger:
$ gdb --args python dev.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python dev.py --gcov [...other args...]
$ python dev.py --lcov-html
"""
#
# This is a generic test runner script for projects using NumPy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "scipy"
PROJECT_ROOT_FILES = ['scipy', 'LICENSE.txt', 'meson.build']
SAMPLE_TEST = "scipy.fftpack.tests.test_real_transforms::TestIDSTIIIInt"
SAMPLE_SUBMODULE = "optimize"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
from pathlib import Path
import platform
# the following multiprocessing import is necessary to prevent tests that use
# multiprocessing from hanging on >= Python3.8 (macOS) using pytest. Just the
# import is enough...
import multiprocessing
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
current_sys_path = sys.path.copy()
from argparse import ArgumentParser, REMAINDER
import shutil
import subprocess
import time
import datetime
import importlib.util
import json # noqa: E402
from sysconfig import get_path
try:
from types import ModuleType as new_module
except ImportError: # old Python
from imp import new_module
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def import_module_from_path(mod_name, mod_path):
"""Import module with name `mod_name` from file path `mod_path`"""
spec = importlib.util.spec_from_file_location(mod_name, mod_path)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
# Import runtests.py
runtests = import_module_from_path('runtests', Path(ROOT_DIR) / 'runtests.py')
# Reassign sys.path as it is changed by the `runtests` import above
sys.path = current_sys_path
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--werror", action="store_true", default=False,
help="Treat warnings as errors")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--refguide-check", action="store_true", default=False,
help="Run refguide check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output"
" goes under build/coverage"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC)."
" gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to `pytest -m` as a marker expression "
"[default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster,"
" constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=1,
help="Number of parallel jobs for build and testing")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="append", metavar="BEFORE",
help=("Compare benchmark results of current HEAD to"
" BEFORE. Use an additional "
"--bench-compare=COMMIT to override HEAD with"
" COMMIT. Note that you need to commit your "
"changes first!"
))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
parser.add_argument("--pep8", action="store_true", default=False,
help="Perform pep8 check with flake8.")
parser.add_argument("--mypy", action="store_true", default=False,
help="Run mypy on the codebase")
parser.add_argument("--doc", action="append", nargs="?",
const="html-scipyorg", help="Build documentation")
parser.add_argument("--win-cp-openblas", action="store_true",
help="If set, and on Windows, copy OpenBLAS lib to "
"install directory after meson install. "
"Note: this argument may be removed in the future "
"once a `site.cfg`-like mechanism to select BLAS/LAPACK "
"libraries is implemented for Meson")
parser.add_argument("--build-dir", default="build",
help="Relative path to the build directory. "
"Default is 'build'")
parser.add_argument("--install-prefix", default=None,
help="Relative path to the install directory. "
"Default is <build-dir>-install.")
args = parser.parse_args(argv)
global PATH_INSTALLED
build_dir = Path(args.build_dir)
install_dir = args.install_prefix
if not install_dir:
install_dir = build_dir.parent / (build_dir.stem + "-install")
PATH_INSTALLED = os.path.join(
os.path.abspath(os.path.dirname(__file__)),
install_dir
)
if args.win_cp_openblas and platform.system() != 'Windows':
raise RuntimeError('--win-cp-openblas only has effect on Windows')
if args.pep8:
# Lint the source using the configuration in tox.ini.
os.system("flake8 scipy benchmarks/benchmarks")
# Lint just the diff since branching off of main using a
# stricter configuration.
lint_diff = os.path.join(ROOT_DIR, 'tools', 'lint_diff.py')
os.system(lint_diff)
sys.exit(0)
if args.mypy:
sys.exit(run_mypy(args))
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
if args.lcov_html:
# generate C code coverage output
runtests.lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
runtests.gcov_reset_counters()
if args.debug and args.bench:
print("*** Benchmarks should not be run against debug version; "
"remove -g flag ***")
if not args.no_build:
site_dir = build_project(args)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = \
os.pathsep.join((site_dir, os.environ.get('PYTHONPATH', '')))
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = new_module('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
import IPython
IPython.embed(user_ns={})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + extra_argv)
sys.exit(1)
if args.doc:
cmd = ["make", "-Cdoc", 'PYTHON="{}"'.format(sys.executable)]
cmd += args.doc
if args.parallel:
cmd.append('SPHINXOPTS="-j{}"'.format(args.parallel))
subprocess.run(cmd, check=True)
sys.exit(0)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, args.build_dir, 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + dst_dir]
if args.refguide_check:
cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
'--doctests']
if args.submodule:
cmd += [args.submodule]
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(0)
if args.bench:
# Run ASV
items = extra_argv
if args.tests:
items += args.tests
if args.submodule:
items += [args.submodule]
bench_args = []
for a in items:
bench_args.extend(['--bench', a])
if not args.bench_compare:
import scipy
print("Running benchmarks for Scipy version %s at %s"
% (scipy.__version__, scipy.__file__))
cmd = ['asv', 'run', '--dry-run', '--show-stderr',
'--python=same'] + bench_args
retval = runtests.run_asv(cmd)
sys.exit(retval)
else:
if len(args.bench_compare) == 1:
commit_a = args.bench_compare[0]
commit_b = 'HEAD'
elif len(args.bench_compare) == 2:
commit_a, commit_b = args.bench_compare
else:
p.error("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet',
'--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*"*80)
print("WARNING: you have uncommitted changes --- "
"these will NOT be benchmarked!")
print("*"*80)
# Fix commit ids (HEAD is local to current repo)
p = subprocess.Popen(['git', 'rev-parse', commit_b],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_b = out.strip()
p = subprocess.Popen(['git', 'rev-parse', commit_a],
stdout=subprocess.PIPE)
out, err = p.communicate()
commit_a = out.strip()
cmd = ['asv', 'continuous', '--show-stderr', '--factor', '1.05',
commit_a, commit_b] + bench_args
runtests.run_asv(cmd)
sys.exit(1)
if args.build_only:
sys.exit(0)
else:
try:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
version = sys.modules[PROJECT_MODULE].__version__
mod_path = sys.modules[PROJECT_MODULE].__file__
mod_path = os.path.abspath(os.path.join(os.path.dirname(mod_path)))
except ImportError:
current_python_path = os.environ.get('PYTHONPATH', None)
print("Unable to import {} from: {}".format(PROJECT_MODULE,
current_python_path))
site_dir = get_site_packages()
print("Trying to import scipy from development installed path at:",
site_dir)
sys.path.insert(0, site_dir)
os.environ['PYTHONPATH'] = \
os.pathsep.join((site_dir, os.environ.get('PYTHONPATH', '')))
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
version = sys.modules[PROJECT_MODULE].__version__
mod_path = sys.modules[PROJECT_MODULE].__file__
mod_path = os.path.abspath(os.path.join(os.path.dirname(mod_path)))
if args.submodule:
tests = [PROJECT_MODULE + "." + args.submodule]
elif args.tests:
tests = args.tests
else:
tests = None
# Run the tests
if not args.no_build:
test_dir = site_dir
else:
test_dir = os.path.join(ROOT_DIR, args.build_dir, 'test')
if not os.path.isdir(test_dir):
os.makedirs(test_dir)
shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
os.chdir(test_dir)
print("Running tests for {} version:{}, installed at:{}".format(
PROJECT_MODULE, version, mod_path))
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage,
tests=tests,
parallel=args.parallel)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def setup_build(args, env):
"""
Setting up meson-build
"""
cmd = ["meson", "setup", args.build_dir, "--prefix", PATH_INSTALLED]
build_dir = Path(args.build_dir)
run_dir = os.getcwd()
if build_dir.exists() and not (build_dir / 'meson-info').exists():
if list(build_dir.iterdir()):
raise RuntimeError("Can't build into non-empty directory "
f"'{build_dir.absolute()}'")
if os.path.exists(build_dir):
build_options_file = (build_dir / "meson-info"
/ "intro-buildoptions.json")
with open(build_options_file) as f:
build_options = json.load(f)
installdir = None
for option in build_options:
if option["name"] == "prefix":
installdir = option["value"]
break
if installdir != PATH_INSTALLED:
run_dir = os.path.join(run_dir, build_dir)
cmd = ["meson", "--reconfigure", "--prefix", PATH_INSTALLED]
else:
return
if args.werror:
cmd += ["--werror"]
# Setting up meson build
ret = subprocess.call(cmd, env=env, cwd=run_dir)
if ret == 0:
print("Meson build setup OK")
else:
print("Meson build setup failed! ({0} elapsed)")
sys.exit(1)
return
def install_project(args):
"""
Installs the project after building.
"""
if os.path.exists(PATH_INSTALLED):
installdir = get_site_packages()
non_empty = len(os.listdir(PATH_INSTALLED))
if non_empty and not os.path.exists(installdir):
raise RuntimeError("Can't install in non-empty directory: "
f"'{PATH_INSTALLED}'")
cmd = ["meson", "install", "-C", args.build_dir]
log_filename = os.path.join(ROOT_DIR, 'meson-install.log')
start_time = datetime.datetime.now()
if args.show_build_log:
ret = subprocess.call(cmd, cwd=ROOT_DIR)
else:
print("Installing, see meson-install.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, stdout=log, stderr=log,
cwd=ROOT_DIR)
try:
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
elapsed = datetime.datetime.now() - start_time
print(" ... installation in progress ({0} "
"elapsed)".format(elapsed))
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
except: # noqa: E722
p.terminate()
raise
elapsed = datetime.datetime.now() - start_time
if ret != 0:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Installation failed! ({0} elapsed)".format(elapsed))
sys.exit(1)
# ignore everything in the install directory.
with open(Path(PATH_INSTALLED) / ".gitignore", "w") as f:
f.write("*")
print("Installation OK")
return
def copy_openblas():
"""
Copies OpenBLAS DLL to the SciPy install dir, and also overwrites the
default `_distributor_init.py` file with the one we use for wheels uploaded
to PyPI so that DLL gets loaded.
Assumes pkg-config is installed and aware of OpenBLAS.
"""
# Get OpenBLAS lib path from pkg-config
cmd = ['pkg-config', '--variable', 'libdir', 'openblas']
result = subprocess.run(cmd, capture_output=True, text=True)
if result.returncode != 0:
print(result.stderrr)
return result.returncode
openblas_lib_path = Path(result.stdout.strip())
if not openblas_lib_path.stem == 'lib':
raise RuntimeError(f'Expecting "lib" at end of "{openblas_lib_path}"')
# Look in bin subdirectory for OpenBLAS binaries.
bin_path = openblas_lib_path.parent / 'bin'
# Locate, make output .libs directory in Scipy install directory.
scipy_path = Path(get_site_packages()) / 'scipy'
libs_path = scipy_path / '.libs'
libs_path.mkdir(exist_ok=True)
# Copy DLL files from OpenBLAS install to scipy install .libs subdir.
for dll_fn in bin_path.glob('*.dll'):
out_fname = libs_path / dll_fn.parts[-1]
print(f'Copying {dll_fn} to {out_fname}')
out_fname.write_bytes(dll_fn.read_bytes())
# Write _distributor_init.py to scipy install dir; this ensures the .libs
# file is on the DLL search path at run-time, so OpenBLAS gets found
openblas_support = import_module_from_path(
'openblas_support',
Path(ROOT_DIR) / 'tools' / 'openblas_support.py')
openblas_support.make_init(scipy_path)
return 0
def get_site_packages():
plat_path = Path(get_path('platlib'))
return str(Path(PATH_INSTALLED) / plat_path.relative_to(sys.exec_prefix))
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
Directory where the built SciPy version was installed. This is a custom
prefix, followed by a relative path matching the one the system would
use for the site-packages of the active Python interpreter.
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run dev.py in "
"git checkout or unpacked source")
sys.exit(1)
env = dict(os.environ)
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
from sysconfig import get_config_vars
cvars = get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = env.get('CC', cvars['CC']) + ' --coverage'
env['CXX'] = env.get('CXX', cvars['CXX']) + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) +\
' --coverage'
setup_build(args, env)
site_dir = get_site_packages()
cmd = ["ninja", "-C", args.build_dir]
if args.parallel > 1:
cmd += ["-j", str(args.parallel)]
# Building with ninja-backend
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
if ret == 0:
print("Build OK")
else:
print("Build failed!")
sys.exit(1)
install_project(args)
if args.win_cp_openblas and platform.system() == 'Windows':
if copy_openblas() == 0:
print('OpenBLAS copied')
else:
print("OpenBLAS copy failed!")
sys.exit(1)
return site_dir
def run_mypy(args):
if args.no_build:
raise ValueError('Cannot run mypy with --no-build')
try:
import mypy.api
except ImportError as e:
raise RuntimeError(
"Mypy not found. Please install it by running "
"pip install -r mypy_requirements.txt from the repo root"
) from e
site_dir = build_project(args)
config = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"mypy.ini",
)
with runtests.working_dir(site_dir):
# By default mypy won't color the output since it isn't being
# invoked from a tty.
os.environ['MYPY_FORCE_COLOR'] = '1'
# Change to the site directory to make sure mypy doesn't pick
# up any type stubs in the source tree.
report, errors, status = mypy.api.run([
"--config-file",
config,
PROJECT_MODULE,
])
print(report, end='')
print(errors, end='', file=sys.stderr)
return status
if __name__ == "__main__":
main(argv=sys.argv[1:])
| |
'''
description: Create a db entry for a VOEvent
license: APACHE 2.0
author: Ronald van Haren, NLeSC (r.vanharen@esciencecenter.nl)
'''
import voeventparse as vp
from pyfrbcatdb import dbase
from pyfrbcatdb.FRBCat import FRBCat_add
from pyfrbcatdb.FRBCat import parse_mapping
from pyfrbcatdb.logger import logger
from dateutil import parser
from astropy import units as u
from astropy.coordinates import SkyCoord
class decode_VOEvent(logger):
'''
Class to decode a VOEvent file and insert it into the
FRBCat database.
:param voevent: filestream or filename
:param dbName: database name
:param dbHost: database host
:param dbPort: database port
:param dbUser: database user name
:param dbPassword: database user password
:param logfile: name of log file
:type voevent: _io.BufferedReader, str
:type dbName: str
:type dbHost: str, NoneType
:type dbPort: str, NoneType
:type dbUser: str, NoneType
:type dbPassword: str, NoneType
:type logfile: str
'''
def __init__(self, voevent, dbName, dbHost, dbPort, dbUser,
dbPassword, logfile):
logger.__init__(self, logfile)
self.dbName = dbName
self.dbHost = dbHost
self.dbPort = dbPort
self.dbUser = dbUser
self.dbPassword = dbPassword
self.process_VOEvent(voevent)
def process_VOEvent(self, voevent):
'''
Main method to process the VOEvent.
:param voevent: filestream or filename
:type voevent: _io.BufferedReader, str
'''
try:
self.logger.info("Processing file {}".format(voevent.name))
except AttributeError:
self.logger.info("Processing file {}".format(voevent))
# load mapping VOEvent -> FRBCAT
mapping = parse_mapping()
# parse VOEvent xml file
vo_dict, event_type = self.parse_VOEvent(voevent, mapping)
# create/delete a new FRBCat entry
self.update_FRBCat(vo_dict, event_type)
try:
self.logger.info("Finished file {}".format(voevent.name))
except AttributeError:
self.logger.info("Finished file {}".format(voevent))
@staticmethod
def get_param(param_data, param_group, param_name):
'''
Get param data for a given attribute.
:param param_data: all param data from VOEvent file
:param param_group: param group in VOEvent which holds param_name
:param param_name: name of parameter to get value for
:type param_data: orderedmultidict.orderedmultidict.omdict
:type param_group: str
:type param_name: str
:returns: param value if defined in VOEvent, else None
:rtype: str, float, int, NoneType
'''
try:
# return value of the param if defined in the XML
return param_data.get(param_group).get(param_name).get('value')
except AttributeError:
# return None for the ones that are not defined in the XML
return None
@staticmethod
def get_description(v, item):
'''
Return description of parameter.
:param v: VOEvent xml
:param item: single dictionary item from mapping
:type v: lxml.objectify.ObjectifiedElement
:type item: dict
:returns: Description on parameter is applicable, else None
:rtype: str, NoneType
'''
param_group = item.get('param_group')
param_name = item.get('param_name')
try:
note = v.find(".//Group[@name='{}']".format(param_group)).find(
".//Param[@name='{}']".format(param_name)).Description
if note:
return "[{}] {}".format(param_name, note)
else:
return None
except AttributeError:
return None
@staticmethod
def get_coord(v, coordname):
'''
Get coordinate from VOEvent file.
- transform to HH:MM:SS if coordname=ra
- transform to DD:HH:SS if coordname=dec
:param v: VOEvent xml
:param coordname: coordinate name ('ra' or 'dec')
:type v: lxml.objectify.ObjectifiedElement
:type coordname: str
:returns: location string in HH:MM:SS.MS for coordname=ra
or DD:HH:SS.MS for coordname=dec
:rtype: str
'''
try:
units = getattr(vp.get_event_position(v, index=0), 'units')
except AttributeError:
return None
if not (units == 'deg'):
raise AttributeError(
'Unable to determine units for position: {}'.format(
vp.get_event_position(v, index=0)))
position = vp.get_event_position(v, index=0)
if (position.system == 'UTC-FK5-GEO'):
skcoord = SkyCoord(ra=position.ra*u.degree,
dec=position.dec*u.degree, frame='fk5')
else:
# use default reference frame
skcoord = SkyCoord(ra=position.ra*u.degree,
dec=position.dec*u.degree)
if (coordname == 'ra'):
# ra location is in hms
coordloc = skcoord.ra.hms
elif (coordname == 'dec'):
# dec location is in dms
coordloc = skcoord.dec.dms
# format location tuple to string
locstring = '{}:{}:{}'.format(
str(int(round(coordloc[0]))).zfill(2),
str(abs(int(round(coordloc[1])))).zfill(2),
"{:.2f}".format(abs(coordloc[2])).zfill(5))
return locstring
@staticmethod
def get_attrib(v, attribname):
'''
Get xml attributes.
:param v: VOEvent xml
:param attribname: attribute name
:type v: lxml.objectify.ObjectifiedElement
:type attribname: str
:returns: v.attrib[attribname]
:rtype: str
'''
try:
return v.attrib[attribname]
except ValueError:
return None
except KeyError:
return None
@staticmethod
def get_utc_time_str(v):
'''
Get time in UTC.
:param v: VOEvent xml
:type v: lxml.objectify.ObjectifiedElement
:returns: time as string 'YYYY-MM-DD HH:MM:SS.MS'
:rtype: str
'''
utctime = vp.get_event_time_as_utc(v, index=0)
return utctime.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
def get_value(self, v, param_data, item, event_type):
'''
Extract the value of item from VOEvent.
:param v: VOEvent xml
:param param_data: all param data from VOEvent file
:param item: single dictionary item from mapping
:param event_type: event type of VOEvent, including
citation if applicable, e.g. ('new', None)
:type v: lxml.objectify.ObjectifiedElement, str
:type param_data: orderedmultidict.orderedmultidict.omdict
:type item: dict
:type event_type: tuple
:returns: value for item
:rtype: int, float, str, bool, NoneType
'''
itemtype = item.get('type')
if itemtype == 'ivorn':
if (event_type[0] == 'supersedes'):
if event_type[1]:
# type supersedes with a valid ivorn citation
return event_type[1]
else:
# type supersedes with no ivorn citation, use event ivorn
return self.get_attrib(v, item.get('name'))
else:
return self.get_attrib(v, item.get('name'))
elif itemtype == 'Param':
return self.get_param(param_data, item.get('param_group'),
item.get('param_name'))
elif itemtype == 'ISOTime':
try:
return self.get_utc_time_str(v)
except AttributeError:
# for type 'retraction' there is no time defined
return None
elif itemtype == 'authortime':
try:
timestr = v.xpath('.//' +
item.get('voevent').replace('.', '/'))[0]
return parser.parse(str(timestr)).strftime('%Y-%m-%d %H:%M:%S')
except IndexError:
return None
elif itemtype == 'XML':
return vp.dumps(v)
elif itemtype == 'voevent':
try:
return v.xpath('.//' +
item.get('voevent').replace('.', '/'))[0]
except IndexError:
return None
elif itemtype == 'Coord':
return self.get_coord(v, item.get('name'))
elif itemtype == 'verify':
# get importance attribute from <Why> section
importance = v.Why.attrib.get(item.get('name'))
# for high importance set verified=True, else False
try:
if (float(importance) >= 0.95):
# high importance, so default to verified
return True
else:
return False
except TypeError:
return False
else:
return None
def parse_VOEvent(self, voevent, mapping):
'''
Parse VOEvent xml file.
:param voevent: VOEvent xml file
:param mapping: mapping from mapping.json
:type voevent: lxml.objectify.ObjectifiedElement, str
:type mapping: dict
:returns: mapping (mapping from mapping.json with values filled),
event_type (event_type and citation if applicable)
:rtype: dict, tuple
'''
# load VOEvent xml file
try:
v = vp.load(voevent)
except AttributeError:
f = open(voevent, "rb")
v = vp.load(f)
f.close()
# assert if xml file is a valid VOEvent
vp.assert_valid_as_v2_0(v)
# Check if the event is a new VOEvent
# For a new VOEvent there should be no citations
try:
event_type = (v.xpath('Citations')[0].EventIVORN.attrib['cite'],
v.xpath('Citations')[0].EventIVORN.text)
except IndexError:
event_type = ('new', None)
self.logger.info("Event of of type: {}".format(event_type))
# use the mapping to get required data from VOEvent xml
# if a path is not found in the xml it gets an empty list which is
# removed in the next step
# puts all params into dict param_data[group][param_name]
try:
param_data = vp.get_grouped_params(v)
except AttributeError:
# <What> section is not needed for retractions
param_data = None
for table in mapping.keys(): # iterate over all tables
for idx, item in enumerate(mapping[table]):
# Add values from XML to dictionary
mapping[table][idx]['value'] = self.get_value(v, param_data,
item, event_type)
if item.get('description'):
note = self.get_description(v, item)
if note:
mapping[table][idx]['note'] = note
return mapping, event_type
def update_FRBCat(self, mapping, event_type):
'''
Add new FRBCat entry. Calls the FRBCat_add class.
:param mapping: mapping from mapping.json
:param event_type: event_type and citation if applicable
:type mapping: dict
:type event_type: tuple
'''
# connect to database
connection, cursor = dbase.connectToDB(self.dbName,
self.dbUser,
self.dbPassword,
self.dbHost,
self.dbPort)
FRBCat = FRBCat_add(connection, cursor, mapping, event_type[0])
if event_type[0] in ['new', 'followup', 'supersedes']:
# for new, followup, supersedes we need to add an entry to FRBCat
FRBCat.add_VOEvent_to_FRBCat()
elif event_type[0] in ['retraction']:
# retract the event
FRBCat.retract(event_type[1])
| |
from __future__ import division
import types
import numpy as np
import scipy.integrate as integrate
from pgmpy.factors.base import BaseFactor
from pgmpy.factors.distributions import GaussianDistribution, CustomDistribution
class ContinuousFactor(BaseFactor):
"""
Base class for factors representing various multivariate
representations.
"""
def __init__(self, variables, pdf, *args, **kwargs):
"""
Parameters
----------
variables: list or array-like
The variables for wich the distribution is defined.
pdf: function
The probability density function of the distribution.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
# Two variable dirichlet distribution with alpha = (1,2)
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.scope()
['x', 'y']
>>> dirichlet_factor.assignment(5,6)
226800.0
"""
if not isinstance(variables, (list, tuple, np.ndarray)):
raise TypeError("variables: Expected type list or array-like, "
"got type {var_type}".format(var_type=type(variables)))
if len(set(variables)) != len(variables):
raise ValueError("Variable names cannot be same.")
variables = list(variables)
if isinstance(pdf, str):
if pdf == 'gaussian':
self.distribution = GaussianDistribution(
variables=variables,
mean=kwargs['mean'],
covariance=kwargs['covariance'])
else:
raise NotImplementedError("{dist} distribution not supported.",
"Please use CustomDistribution".
format(dist=pdf))
elif isinstance(pdf, CustomDistribution):
self.distribution = pdf
elif callable(pdf):
self.distribution = CustomDistribution(
variables=variables,
distribution=pdf)
else:
raise ValueError("pdf: Expected type: str or function, ",
"Got: {instance}".format(instance=type(variables)))
@property
def pdf(self):
"""
Returns the pdf of the ContinuousFactor.
"""
return self.distribution.pdf
@property
def variable(self):
return self.scope()[0]
def scope(self):
"""
Returns the scope of the factor.
Returns
-------
list: List of variable names in the scope of the factor.
Examples
--------
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from scipy.stats import multivariate_normal
>>> normal_pdf = lambda x: multivariate_normal(x, [0, 0], [[1, 0], [0, 1]])
>>> phi = ContinuousFactor(['x1', 'x2'], normal_pdf)
>>> phi.scope()
['x1', 'x2']
"""
return self.distribution.variables
def get_evidence(self):
return self.scope()[1:]
def assignment(self, *args):
"""
Returns a list of pdf assignments for the corresponding values.
Parameters
----------
*args: values
Values whose assignment is to be computed.
Examples
--------
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from scipy.stats import multivariate_normal
>>> normal_pdf = lambda x1, x2: multivariate_normal.pdf((x1, x2), [0, 0], [[1, 0], [0, 1]])
>>> phi = ContinuousFactor(['x1', 'x2'], normal_pdf)
>>> phi.assignment(1, 2)
0.013064233284684921
"""
return self.distribution.assignment(*args)
def copy(self):
"""
Return a copy of the distribution.
Returns
-------
ContinuousFactor object: copy of the distribution
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
# Two variable dirichlet distribution with alpha = (1,2)
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.variables
['x', 'y']
>>> copy_factor = dirichlet_factor.copy()
>>> copy_factor.variables
['x', 'y']
"""
return ContinuousFactor(self.scope(), self.distribution.copy())
def discretize(self, method, *args, **kwargs):
"""
Discretizes the continuous distribution into discrete
probability masses using various methods.
Parameters
----------
method : A Discretizer Class from pgmpy.discretize
*args, **kwargs:
The parameters to be given to the Discretizer Class.
Returns
-------
An n-D array or a DiscreteFactor object according to the discretiztion
method used.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from pgmpy.factors.continuous import RoundingDiscretizer
>>> def dirichlet_pdf(x, y):
... return (np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> dirichlet_factor = ContinuousFactor(['x', 'y'], dirichlet_pdf)
>>> dirichlet_factor.discretize(RoundingDiscretizer, low=1, high=2, cardinality=5)
# TODO: finish this
"""
return method(self, *args, **kwargs).get_discrete_values()
def reduce(self, values, inplace=True):
"""
Reduces the factor to the context of the given variable values.
Parameters
----------
values: list, array-like
A list of tuples of the form (variable_name, variable_value).
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new ContinuosFactor object.
Returns
-------
ContinuousFactor or None: if inplace=True (default) returns None
if inplace=False returns a new ContinuousFactor instance.
Examples
--------
>>> import numpy as np
>>> from scipy.special import beta
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> def custom_pdf(x, y, z):
... return z*(np.power(x, 1) * np.power(y, 2)) / beta(x, y)
>>> custom_factor = ContinuousFactor(['x', 'y', 'z'], custom_pdf)
>>> custom_factor.variables
['x', 'y', 'z']
>>> custom_factor.assignment(1, 2, 3)
24.0
>>> custom_factor.reduce([('y', 2)])
>>> custom_factor.variables
['x', 'z']
>>> custom_factor.assignment(1, 3)
24.0
"""
phi = self if inplace else self.copy()
phi.distribution = phi.distribution.reduce(values, inplace=False)
if not inplace:
return phi
def marginalize(self, variables, inplace=True):
"""
Marginalize the factor with respect to the given variables.
Parameters
----------
variables: list, array-like
List of variables with respect to which factor is to be maximized.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new ContinuousFactor instance.
Returns
-------
DiscreteFactor or None: if inplace=True (default) returns None
if inplace=False returns a new ContinuousFactor instance.
Examples
--------
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from scipy.stats import multivariate_normal
>>> std_normal_pdf = lambda *x: multivariate_normal.pdf(x, [0, 0], [[1, 0], [0, 1]])
>>> std_normal = ContinuousFactor(['x1', 'x2'], std_normal_pdf)
>>> std_normal.scope()
['x1', 'x2']
>>> std_normal.assignment([1, 1])
0.058549831524319168
>>> std_normal.marginalize(['x2'])
>>> std_normal.scope()
['x1']
>>> std_normal.assignment(1)
"""
phi = self if inplace else self.copy()
phi.distribution = phi.distribution.marginalize(variables, inplace=False)
if not inplace:
return phi
def normalize(self, inplace=True):
"""
Normalizes the pdf of the continuous factor so that it integrates to
1 over all the variables.
Parameters
----------
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
ContinuousFactor or None:
if inplace=True (default) returns None
if inplace=False returns a new ContinuousFactor instance.
Examples
--------
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from scipy.stats import multivariate_normal
>>> std_normal_pdf = lambda x: 2 * multivariate_normal.pdf(x, [0, 0], [[1, 0], [0, 1]])
>>> std_normal = ContinuousFactor(['x1', 'x2'], std_normal_pdf)
>>> std_normal.assignment(1, 1)
0.117099663049
>>> std_normal.normalize()
>>> std_normal.assignment(1, 1)
0.0585498315243
"""
phi = self if inplace else self.copy()
phi.distriution = phi.distribution.normalize(inplace=False)
if not inplace:
return phi
def is_valid_cpd(self):
return self.distribution.is_valid_cpd()
def _operate(self, other, operation, inplace=True):
"""
Gives the ContinuousFactor operation (product or divide) with
the other factor.
Parameters
----------
other: ContinuousFactor
The ContinuousFactor to be multiplied.
operation: String
'product' for multiplication operation and 'divide' for
division operation.
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Returns
-------
ContinuousFactor or None:
if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
"""
if not isinstance(other, ContinuousFactor):
raise TypeError("ContinuousFactor objects can only be multiplied ",
"or divided with another ContinuousFactor object. ",
"Got {other_type}, expected: ContinuousFactor.".format(
other_type=type(other)))
phi = self if inplace else self.copy()
phi.distribution = phi.distribution._operate(
other=other.distribution, operation=operation, inplace=False)
if not inplace:
return phi
def product(self, other, inplace=True):
"""
Gives the ContinuousFactor product with the other factor.
Parameters
----------
other: ContinuousFactor
The ContinuousFactor to be multiplied.
Returns
-------
ContinuousFactor or None:
if inplace=True (default) returns None
if inplace=False returns a new `ContinuousFactor` instance.
Example
-------
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from scipy.stats import multivariate_normal
>>> sn_pdf1 = lambda x: multivariate_normal.pdf([x], [0], [[1]])
>>> sn_pdf2 = lambda x1,x2: multivariate_normal.pdf([x1, x2], [0, 0], [[1, 0], [0, 1]])
>>> sn1 = ContinuousFactor(['x2'], sn_pdf1)
>>> sn2 = ContinuousFactor(['x1', 'x2'], sn_pdf2)
>>> sn3 = sn1.product(sn2, inplace=False)
>>> sn3.assignment(0, 0)
0.063493635934240983
>>> sn3 = sn1 * sn2
>>> sn3.assignment(0, 0)
0.063493635934240983
"""
return self._operate(other, 'product', inplace)
def divide(self, other, inplace=True):
"""
Gives the ContinuousFactor divide with the other factor.
Parameters
----------
other: ContinuousFactor
The ContinuousFactor to be multiplied.
Returns
-------
ContinuousFactor or None:
if inplace=True (default) returns None
if inplace=False returns a new `DiscreteFactor` instance.
Example
-------
>>> from pgmpy.factors.continuous import ContinuousFactor
>>> from scipy.stats import multivariate_normal
>>> sn_pdf1 = lambda x: multivariate_normal.pdf([x], [0], [[1]])
>>> sn_pdf2 = lambda x1,x2: multivariate_normal.pdf([x1, x2], [0, 0], [[1, 0], [0, 1]])
>>> sn1 = ContinuousFactor(['x2'], sn_pdf1)
>>> sn2 = ContinuousFactor(['x1', 'x2'], sn_pdf2)
>>> sn4 = sn2.divide(sn1, inplace=False)
>>> sn4.assignment(0, 0)
0.3989422804014327
>>> sn4 = sn2 / sn1
>>> sn4.assignment(0, 0)
0.3989422804014327
"""
if set(other.scope()) - set(self.scope()):
raise ValueError("Scope of divisor should be a subset of dividend")
return self._operate(other, 'divide', inplace)
def __mul__(self, other):
return self.product(other, inplace=False)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.divide(other, inplace=False)
__div__ = __truediv__
| |
# Copyright 2013 OpenStack Foundation
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import migrate
from migrate import exceptions
from oslo_config import cfg
from oslo_db.sqlalchemy import migration
from oslo_serialization import jsonutils
from oslo_utils import importutils
import six
import sqlalchemy
from keystone.common import sql
from keystone.common.sql import migrate_repo
from keystone import contrib
from keystone import exception
from keystone.i18n import _
CONF = cfg.CONF
DEFAULT_EXTENSIONS = ['endpoint_filter',
'endpoint_policy',
'federation',
'oauth1',
'revoke',
]
def get_default_domain():
# Return the reference used for the default domain structure during
# sql migrations.
return {
'id': CONF.identity.default_domain_id,
'name': 'Default',
'enabled': True,
'extra': jsonutils.dumps({'description': 'Owns users and tenants '
'(i.e. projects) available '
'on Identity API v2.'})}
# Different RDBMSs use different schemes for naming the Foreign Key
# Constraints. SQLAlchemy does not yet attempt to determine the name
# for the constraint, and instead attempts to deduce it from the column.
# This fails on MySQL.
def get_constraints_names(table, column_name):
fkeys = [fk.name for fk in table.constraints
if (isinstance(fk, sqlalchemy.ForeignKeyConstraint) and
column_name in fk.columns)]
return fkeys
# remove_constraints and add_constraints both accept a list of dictionaries
# that contain:
# {'table': a sqlalchemy table. The constraint is added to dropped from
# this table.
# 'fk_column': the name of a column on the above table, The constraint
# is added to or dropped from this column
# 'ref_column':a sqlalchemy column object. This is the reference column
# for the constraint.
def remove_constraints(constraints):
for constraint_def in constraints:
constraint_names = get_constraints_names(constraint_def['table'],
constraint_def['fk_column'])
for constraint_name in constraint_names:
migrate.ForeignKeyConstraint(
columns=[getattr(constraint_def['table'].c,
constraint_def['fk_column'])],
refcolumns=[constraint_def['ref_column']],
name=constraint_name).drop()
def add_constraints(constraints):
for constraint_def in constraints:
if constraint_def['table'].kwargs.get('mysql_engine') == 'MyISAM':
# Don't try to create constraint when using MyISAM because it's
# not supported.
continue
ref_col = constraint_def['ref_column']
ref_engine = ref_col.table.kwargs.get('mysql_engine')
if ref_engine == 'MyISAM':
# Don't try to create constraint when using MyISAM because it's
# not supported.
continue
migrate.ForeignKeyConstraint(
columns=[getattr(constraint_def['table'].c,
constraint_def['fk_column'])],
refcolumns=[constraint_def['ref_column']]).create()
def rename_tables_with_constraints(renames, constraints, engine):
"""Renames tables with foreign key constraints.
Tables are renamed after first removing constraints. The constraints are
replaced after the rename is complete.
This works on databases that don't support renaming tables that have
constraints on them (DB2).
`renames` is a dict, mapping {'to_table_name': from_table, ...}
"""
if engine.name != 'sqlite':
# Sqlite doesn't support constraints, so nothing to remove.
remove_constraints(constraints)
for to_table_name in renames:
from_table = renames[to_table_name]
from_table.rename(to_table_name)
if engine != 'sqlite':
add_constraints(constraints)
def find_migrate_repo(package=None, repo_name='migrate_repo'):
package = package or sql
path = os.path.abspath(os.path.join(
os.path.dirname(package.__file__), repo_name))
if os.path.isdir(path):
return path
raise exception.MigrationNotProvided(package.__name__, path)
def _sync_common_repo(version):
abs_path = find_migrate_repo()
init_version = migrate_repo.DB_INIT_VERSION
engine = sql.get_engine()
migration.db_sync(engine, abs_path, version=version,
init_version=init_version)
def _fix_federation_tables(engine):
"""Fix the identity_provider, federation_protocol and mapping tables
to be InnoDB and Charset UTF8.
This function is to work around bug #1426334. This has occurred because
the original migration did not specify InnoDB and charset utf8. Due
to the sanity_check, a deployer can get wedged here and require manual
database changes to fix.
"""
# NOTE(marco-fargetta) This is a workaround to "fix" that tables only
# if we're under MySQL
if engine.name == 'mysql':
# * Disable any check for the foreign keys because they prevent the
# alter table to execute
engine.execute("SET foreign_key_checks = 0")
# * Make the tables using InnoDB engine
engine.execute("ALTER TABLE identity_provider Engine=InnoDB")
engine.execute("ALTER TABLE federation_protocol Engine=InnoDB")
engine.execute("ALTER TABLE mapping Engine=InnoDB")
# * Make the tables using utf8 encoding
engine.execute("ALTER TABLE identity_provider "
"CONVERT TO CHARACTER SET utf8")
engine.execute("ALTER TABLE federation_protocol "
"CONVERT TO CHARACTER SET utf8")
engine.execute("ALTER TABLE mapping CONVERT TO CHARACTER SET utf8")
# * Revert the foreign keys check back
engine.execute("SET foreign_key_checks = 1")
def _sync_extension_repo(extension, version):
init_version = 0
engine = sql.get_engine()
try:
package_name = '.'.join((contrib.__name__, extension))
package = importutils.import_module(package_name)
except ImportError:
raise ImportError(_("%s extension does not exist.")
% package_name)
try:
abs_path = find_migrate_repo(package)
try:
migration.db_version_control(sql.get_engine(), abs_path)
# Register the repo with the version control API
# If it already knows about the repo, it will throw
# an exception that we can safely ignore
except exceptions.DatabaseAlreadyControlledError:
pass
except exception.MigrationNotProvided as e:
print(e)
sys.exit(1)
try:
migration.db_sync(engine, abs_path, version=version,
init_version=init_version)
except ValueError:
# NOTE(marco-fargetta): ValueError is raised from the sanity check (
# verifies that tables are utf8 under mysql). The federation_protocol,
# identity_provider and mapping tables were not initially built with
# InnoDB and utf8 as part of the table arguments when the migration
# was initially created. Bug #1426334 is a scenario where the deployer
# can get wedged, unable to upgrade or downgrade.
# This is a workaround to "fix" those tables if we're under MySQL and
# the version is before the 6 because before the tables were introduced
# before and patched when migration 5 was available
if engine.name == 'mysql' and \
int(six.text_type(get_db_version(extension))) < 6:
_fix_federation_tables(engine)
# The migration is applied again after the fix
migration.db_sync(engine, abs_path, version=version,
init_version=init_version)
else:
raise
def sync_database_to_version(extension=None, version=None):
if not extension:
_sync_common_repo(version)
# If version is greater than 0, it is for the common
# repository only, and only that will be synchronized.
if version is None:
for default_extension in DEFAULT_EXTENSIONS:
_sync_extension_repo(default_extension, version)
else:
_sync_extension_repo(extension, version)
def get_db_version(extension=None):
if not extension:
return migration.db_version(sql.get_engine(), find_migrate_repo(),
migrate_repo.DB_INIT_VERSION)
try:
package_name = '.'.join((contrib.__name__, extension))
package = importutils.import_module(package_name)
except ImportError:
raise ImportError(_("%s extension does not exist.")
% package_name)
return migration.db_version(
sql.get_engine(), find_migrate_repo(package), 0)
def print_db_version(extension=None):
try:
db_version = get_db_version(extension=extension)
print(db_version)
except exception.MigrationNotProvided as e:
print(e)
sys.exit(1)
| |
import unittest
import mock
import numpy
import six
import chainer
from chainer import _backprop_utils
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
def make_array(start, shape, dtype):
size = numpy.product(shape, dtype='i')
a = numpy.arange(start, start + size)
a = a.reshape(shape)
a = a.astype(dtype, copy=False)
return a
class FuncWithBackward(chainer.FunctionNode):
def backward(self, target_input_indexes, grad_outputs):
return self._mock_backward(target_input_indexes, grad_outputs)
class FuncWithBackwardAccumulate(chainer.FunctionNode):
def backward_accumulate(self, target_input_indexes, grad_outputs,
grad_inputs):
"""Computes gradients w.r.t.\\ specified inputs and accumulates them.
This method provides a way to fuse the backward computation and the
gradient accumulations in the case that the multiple functions are
applied to the same variable.
Users have to override either of this method or :meth:`backward`.
It is often simpler to implement :meth:`backward` and is recommended
if you do not need to provide efficient gradient accumulation.
Args:
target_input_indexes (tuple of int): Indices of the input variables
w.r.t. which the gradients are required. It is guaranteed that
this tuple contains at least one element.
grad_outputs (tuple of Variable): Gradients w.r.t. the output
variables. If the gradient w.r.t. an output variable is not
given, the corresponding element is ``None``.
grad_inputs (tuple of Variable): Gradients w.r.t. the input
variables specified by ``target_input_indexes``. These values
are computed by other computation paths. If there is no
gradient value existing for the variable, the corresponding
element is ``None``. See also the note below.
Returns:
Tuple of variables that represent the gradients w.r.t. specified
input variables. Unlike :meth:`backward`, the length of the tuple
**must** be same as that of ``target_input_indices``.
.. note::
When the same variable is passed to the multiple input arguments of
a function, only the first position of ``grad_inputs`` corresponding
to these input arguments may contain the gradient variable
corresponding to that input variable, and other entries are set to
``None``. This is an implementation-detail convention to avoid the
complication of correctly accumulating gradients in such a case.
This behavior might be changed in a future version.
"""
assert isinstance(target_input_indexes, tuple)
assert isinstance(grad_outputs, tuple)
assert isinstance(grad_inputs, tuple)
# The default implementation uses backward(). You can override this
# method without using backward().
gxs = self._mock_backward(target_input_indexes, grad_outputs)
len_gxs = len(gxs)
if len_gxs == len(self.inputs):
gxs = tuple([gxs[i] for i in target_input_indexes])
elif len_gxs != len(target_input_indexes):
raise ValueError(
'number of gradients returned by %s (%s) is incorrect.'
% (self._impl_name, self.label))
return tuple([gx if g_input is None else
g_input if gx is None else
gx + g_input
for gx, g_input in six.moves.zip(gxs, grad_inputs)])
@testing.parameterize(*testing.product({
'y_shape': [(4,), (0,), (2, 3), ()],
'x_shape': [(3,), (0,), (4, 1), ()],
'override': ['backward', 'backward_accumulate'],
}))
class TestFunctionNode(unittest.TestCase):
def _get_method(self, prefix, gpu):
suffix = 'gpu' if gpu else 'cpu'
return getattr(self.f, prefix + '_' + suffix)
def setUp(self):
y_shape = self.y_shape
x_shape = self.x_shape
y1 = make_array(1, y_shape, numpy.float32)
y2 = make_array(2, y_shape, numpy.float32)
gx1 = chainer.Variable(
make_array(1, x_shape, numpy.float32))
gx2 = None
gy1 = make_array(1, y_shape, numpy.float32)
gy2 = make_array(1, y_shape, numpy.float32)
f = {
'backward': FuncWithBackward,
'backward_accumulate': FuncWithBackwardAccumulate,
}[self.override]()
f._mock_backward = mock.MagicMock(return_value=(gx1, gx2))
f.check_type_forward = mock.MagicMock()
f.forward_cpu = mock.MagicMock(return_value=(y1, y2))
f.forward_gpu = mock.MagicMock()
self.f = f
self.x1 = make_array(0, x_shape, numpy.float32)
self.x2 = make_array(0, x_shape, numpy.int32)
self.y1 = y1
self.y2 = y2
self.gx1 = gx1
self.gx2 = gx2
self.gx1_orig = chainer.Variable(
make_array(3, x_shape, numpy.float32))
self.gx2_orig = chainer.Variable(
make_array(2, x_shape, numpy.float32))
self.gx1_accum = gx1 + self.gx1_orig
self.gy1 = gy1
self.gy2 = gy2
def tearDown(self):
# Set None to delete cuda array
self.f = None
self.y1 = None
self.y2 = None
self.gx1 = None
def setup_gpu(self):
self.x1 = cuda.to_gpu(self.x1)
self.x2 = cuda.to_gpu(self.x2)
self.y1 = cuda.to_gpu(self.y1)
self.y2 = cuda.to_gpu(self.y2)
self.gx1.to_gpu()
self.gx1_orig.to_gpu()
self.gx2_orig.to_gpu()
self.gx1_accum.to_gpu()
self.gy1 = cuda.to_gpu(self.gy1)
self.gy2 = cuda.to_gpu(self.gy2)
self.f.forward_gpu = mock.MagicMock(return_value=(self.y1, self.y2))
self.f._mock_backward = mock.MagicMock(
return_value=(self.gx1, self.gx2))
def check_backprop_step(self, gxs):
flag_none = gxs[0] is None
x1 = chainer.Variable(self.x1)
x2 = chainer.Variable(self.x2)
self.f.inputs = (x1.node, x2.node)
gxrefs = [[gx] if gx is not None else [] for gx in gxs]
grad_outputs = (self.gy1, self.gy2)
grad_inputs = dict(zip(self.f.inputs, gxrefs))
_backprop_utils.backprop_step(
self.f, (0, 1), grad_outputs, grad_inputs)
if not chainer.configuration.config.lazy_grad_sum:
# assert eager grad sum
for gxref in gxrefs:
self.assertLessEqual(len(gxref), 1)
gx1 = _backprop_utils._reduce(gxrefs[0])
gx2 = _backprop_utils._reduce(gxrefs[1])
if flag_none:
numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
cuda.to_cpu(self.gx1.data))
self.assertIsNone(gx2)
else:
numpy.testing.assert_array_equal(cuda.to_cpu(gx1.data),
cuda.to_cpu(self.gx1_accum.data))
numpy.testing.assert_array_equal(cuda.to_cpu(gx2.data),
cuda.to_cpu(self.gx2_orig.data))
def test_backprop_step_none_cpu(self):
self.check_backprop_step((None, None))
@attr.gpu
def test_backprop_step_none_gpu(self):
self.setup_gpu()
self.check_backprop_step((None, None))
def test_backprop_step_cpu(self):
self.check_backprop_step((self.gx1_orig, self.gx2_orig))
@attr.gpu
def test_backprop_step_gpu(self):
self.setup_gpu()
self.check_backprop_step((self.gx1_orig, self.gx2_orig))
testing.run_module(__name__, __file__)
| |
#!/usr/bin/env python
"""Unit tests run as PYTHONPATH=../../.. python3 ./test_valve.py."""
# Copyright (C) 2015 Research and Innovation Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2019 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import unittest
from ryu.lib import mac
from ryu.lib.packet import slow
from ryu.ofproto import ether
from ryu.ofproto import ofproto_v1_3 as ofp
from ryu.ofproto import ofproto_v1_3_parser as parser
from faucet import valve_of
from faucet import valve_packet
from valve_test_lib import (
CONFIG, DP1_CONFIG, FAUCET_MAC, GROUP_DP1_CONFIG, IDLE_DP1_CONFIG,
ValveTestBases)
from fakeoftable import CONTROLLER_PORT
class ValveTestCase(ValveTestBases.ValveTestBig):
"""Run complete set of basic tests."""
class ValveFuzzTestCase(ValveTestBases.ValveTestSmall):
"""Test unknown ports/VLANs."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_fuzz_vlan(self):
"""Test unknown VIDs/ports."""
for _ in range(0, 3):
for i in range(0, 64):
self.rcv_packet(1, i, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.P2_V200_MAC,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3',
'vid': i})
for i in range(0, 64):
self.rcv_packet(i, 0x100, {
'eth_src': self.P1_V100_MAC,
'eth_dst': self.P2_V200_MAC,
'ipv4_src': '10.0.0.2',
'ipv4_dst': '10.0.0.3',
'vid': 0x100})
# pylint: disable=no-member
# pylint: disable=no-value-for-parameter
cache_info = valve_packet.parse_packet_in_pkt.cache_info()
self.assertGreater(cache_info.hits, cache_info.misses, msg=cache_info)
class ValveCoprocessorTestCase(ValveTestBases.ValveTestSmall):
"""Test direct packet output using coprocessor."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
coprocessor: {strategy: vlan_vid, vlan_vid_base: 100}
p2:
number: 2
native_vlan: 0x100
p3:
number: 3
native_vlan: 0x100
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_output(self):
copro_vid_out = 102 | ofp.OFPVID_PRESENT
direct_match = {
'in_port': 1, 'vlan_vid': copro_vid_out, 'eth_type': ether.ETH_TYPE_IP,
'eth_src': self.P1_V100_MAC, 'eth_dst': mac.BROADCAST_STR}
self.assertTrue(self.table.is_output(direct_match, port=2))
p2_host_match = {
'eth_src': self.P1_V100_MAC, 'eth_dst': self.P2_V200_MAC,
'ipv4_src': '10.0.0.2', 'ipv4_dst': '10.0.0.3',
'eth_type': ether.ETH_TYPE_IP}
p2_host_receive = copy.deepcopy(p2_host_match)
p2_host_receive.update({'in_port': 2})
# learn P2 host
self.rcv_packet(2, 0x100, p2_host_receive)
# copro can send to P2 via regular pipeline
p2_copro_host_receive = copy.deepcopy(p2_host_match)
p2_copro_host_receive.update(
{'in_port': 1,
'eth_src': p2_host_match['eth_dst'],
'eth_dst': p2_host_match['eth_src']})
p2_copro_host_receive['vlan_vid'] = 0x100 | ofp.OFPVID_PRESENT
self.assertTrue(self.table.is_output(p2_copro_host_receive, port=2, vid=0x100))
# copro send to P2 was not flooded
self.assertFalse(self.table.is_output(p2_copro_host_receive, port=3, vid=0x100))
class ValveRestBcastTestCase(ValveTestBases.ValveTestSmall):
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: 0x100
restricted_bcast_arpnd: true
p2:
number: 2
native_vlan: 0x100
p3:
number: 3
native_vlan: 0x100
restricted_bcast_arpnd: true
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_rest_bcast(self):
match = {
'in_port': 1, 'vlan_vid': 0, 'eth_type': ether.ETH_TYPE_IP,
'eth_src': self.P1_V100_MAC, 'eth_dst': mac.BROADCAST_STR}
self.assertTrue(self.table.is_output(match, port=2))
self.assertFalse(self.table.is_output(match, port=3))
match = {
'in_port': 2, 'vlan_vid': 0, 'eth_type': ether.ETH_TYPE_IP,
'eth_src': self.P1_V100_MAC, 'eth_dst': mac.BROADCAST_STR}
self.assertTrue(self.table.is_output(match, port=1))
self.assertTrue(self.table.is_output(match, port=3))
class ValveOFErrorTestCase(ValveTestBases.ValveTestSmall):
"""Test decoding of OFErrors."""
def setUp(self):
self.setup_valve(CONFIG)
def test_oferror_parser(self):
"""Test OF error parser works"""
for type_code, error_tuple in valve_of.OFERROR_TYPE_CODE.items():
self.assertTrue(isinstance(type_code, int))
type_str, error_codes = error_tuple
self.assertTrue(isinstance(type_str, str))
for error_code, error_str in error_codes.items():
self.assertTrue(isinstance(error_code, int))
self.assertTrue(isinstance(error_str, str))
test_err = parser.OFPErrorMsg(
datapath=None, type_=ofp.OFPET_FLOW_MOD_FAILED, code=ofp.OFPFMFC_UNKNOWN)
self.valve.oferror(test_err)
test_unknown_type_err = parser.OFPErrorMsg(
datapath=None, type_=666, code=ofp.OFPFMFC_UNKNOWN)
self.valve.oferror(test_unknown_type_err)
test_unknown_code_err = parser.OFPErrorMsg(
datapath=None, type_=ofp.OFPET_FLOW_MOD_FAILED, code=666)
self.valve.oferror(test_unknown_code_err)
class ValveGroupTestCase(ValveTestBases.ValveTestSmall):
"""Tests for datapath with group support."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
""" % GROUP_DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_unknown_eth_dst_rule(self):
"""Test that packets with unkown eth dst addrs get flooded correctly.
They must be output to each port on the associated vlan, with the
correct vlan tagging. And they must not be forwarded to a port not
on the associated vlan
"""
self.learn_hosts()
matches = [
{
'in_port': 3,
'vlan_vid': self.V100,
},
{
'in_port': 2,
'vlan_vid': 0,
'eth_dst': self.P1_V100_MAC
},
{
'in_port': 1,
'vlan_vid': 0,
'eth_src': self.P1_V100_MAC
},
{
'in_port': 3,
'vlan_vid': self.V200,
'eth_src': self.P2_V200_MAC,
}
]
self.verify_flooding(matches)
class ValveIdleLearnTestCase(ValveTestBases.ValveTestSmall):
"""Smoke test for idle-flow based learning. This feature is not currently reliable."""
CONFIG = """
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: v100
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
output_only: True
mirror: 4
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
""" % IDLE_DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_known_eth_src_rule(self):
"""Test removal flow handlers."""
self.learn_hosts()
self.assertTrue(
self.valve.flow_timeout(
self.mock_time(),
self.valve.dp.tables['eth_dst'].table_id,
{'vlan_vid': self.V100, 'eth_dst': self.P1_V100_MAC}))
self.assertFalse(
self.valve.flow_timeout(
self.mock_time(),
self.valve.dp.tables['eth_src'].table_id,
{'vlan_vid': self.V100, 'in_port': 1, 'eth_src': self.P1_V100_MAC}))
def test_host_learn_coldstart(self):
"""Test flow learning, including cold-start cache invalidation"""
match = {
'in_port': 3, 'vlan_vid': self.V100, 'eth_type': ether.ETH_TYPE_IP,
'eth_src': self.P3_V100_MAC, 'eth_dst': self.P1_V100_MAC}
self.assertTrue(self.table.is_output(match, port=1))
self.assertTrue(self.table.is_output(match, port=2))
self.assertTrue(self.table.is_output(match, port=CONTROLLER_PORT))
self.learn_hosts()
self.assertTrue(self.table.is_output(match, port=1))
self.assertFalse(self.table.is_output(match, port=2))
self.assertFalse(self.table.is_output(match, port=CONTROLLER_PORT))
self.cold_start()
self.assertTrue(self.table.is_output(match, port=1))
self.assertTrue(self.table.is_output(match, port=2))
self.assertTrue(self.table.is_output(match, port=CONTROLLER_PORT))
self.mock_time(self.valve.dp.timeout // 4 * 3)
self.learn_hosts()
self.assertTrue(self.table.is_output(match, port=1))
self.assertFalse(self.table.is_output(match, port=2))
self.assertFalse(self.table.is_output(match, port=CONTROLLER_PORT))
class ValveLACPTestCase(ValveTestBases.ValveTestSmall):
"""Test LACP."""
CONFIG = """
dps:
s1:
%s
lacp_timeout: 5
interfaces:
p1:
number: 1
native_vlan: v100
lacp: 1
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
tagged_vlans: [v300]
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
v300:
vid: 0x300
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
self.activate_all_ports()
def test_lacp(self):
"""Test LACP comes up."""
test_port = 1
labels = self.port_labels(test_port)
self.assertEqual(
1, int(self.get_prom('port_lacp_state', labels=labels)))
self.assertFalse(
self.valve.dp.ports[1].non_stack_forwarding())
self.rcv_packet(test_port, 0, {
'actor_system': '0e:00:00:00:00:02',
'partner_system': FAUCET_MAC,
'eth_dst': slow.SLOW_PROTOCOL_MULTICAST,
'eth_src': '0e:00:00:00:00:02',
'actor_state_synchronization': 1})
self.assertEqual(
3, int(self.get_prom('port_lacp_state', labels=labels)))
self.assertTrue(
self.valve.dp.ports[1].non_stack_forwarding())
self.learn_hosts()
self.verify_expiry()
def test_lacp_flap(self):
"""Test LACP handles state 0->1->0."""
test_port = 1
labels = self.port_labels(test_port)
self.assertEqual(
1, int(self.get_prom('port_lacp_state', labels=labels)))
self.assertFalse(
self.valve.dp.ports[1].non_stack_forwarding())
self.rcv_packet(test_port, 0, {
'actor_system': '0e:00:00:00:00:02',
'partner_system': FAUCET_MAC,
'eth_dst': slow.SLOW_PROTOCOL_MULTICAST,
'eth_src': '0e:00:00:00:00:02',
'actor_state_synchronization': 1})
self.assertEqual(
3, int(self.get_prom('port_lacp_state', labels=labels)))
self.assertTrue(
self.valve.dp.ports[1].non_stack_forwarding())
self.learn_hosts()
self.verify_expiry()
self.rcv_packet(test_port, 0, {
'actor_system': '0e:00:00:00:00:02',
'partner_system': FAUCET_MAC,
'eth_dst': slow.SLOW_PROTOCOL_MULTICAST,
'eth_src': '0e:00:00:00:00:02',
'actor_state_synchronization': 0})
self.assertEqual(
5, int(self.get_prom('port_lacp_state', labels=labels)))
self.assertFalse(
self.valve.dp.ports[1].non_stack_forwarding())
def test_lacp_timeout(self):
"""Test LACP comes up and then times out."""
test_port = 1
labels = self.port_labels(test_port)
self.assertEqual(
1, int(self.get_prom('port_lacp_state', labels=labels)))
self.assertFalse(
self.valve.dp.ports[1].non_stack_forwarding())
self.rcv_packet(test_port, 0, {
'actor_system': '0e:00:00:00:00:02',
'partner_system': FAUCET_MAC,
'eth_dst': slow.SLOW_PROTOCOL_MULTICAST,
'eth_src': '0e:00:00:00:00:02',
'actor_state_synchronization': 1})
self.assertEqual(
3, int(self.get_prom('port_lacp_state', labels=labels)))
self.assertTrue(
self.valve.dp.ports[1].non_stack_forwarding())
future_now = self.mock_time(10)
expire_ofmsgs = self.valve.state_expire(future_now, None)
self.assertTrue(expire_ofmsgs)
self.assertEqual(
1, int(self.get_prom('port_lacp_state', labels=labels)))
self.assertFalse(
self.valve.dp.ports[1].non_stack_forwarding())
class ValveTFMSizeOverride(ValveTestBases.ValveTestSmall):
"""Test TFM size override."""
CONFIG = """
dps:
s1:
%s
table_sizes:
eth_src: 999
interfaces:
p1:
number: 1
native_vlan: v100
vlans:
v100:
vid: 0x100
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_size(self):
tfm_by_name = {body.name: body for body in self.table.tfm.values()}
eth_src_table = tfm_by_name.get(b'eth_src', None)
self.assertTrue(eth_src_table)
if eth_src_table is not None:
self.assertEqual(999, eth_src_table.max_entries)
class ValveTFMSize(ValveTestBases.ValveTestSmall):
"""Test TFM sizer."""
NUM_PORTS = 128
CONFIG = """
dps:
s1:
%s
lacp_timeout: 5
interfaces:
p1:
number: 1
native_vlan: v100
lacp: 1
lacp_active: True
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
tagged_vlans: [v300]
interface_ranges:
6-128:
native_vlan: v100
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
v300:
vid: 0x300
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
def test_size(self):
tfm_by_name = {body.name: body for body in self.table.tfm.values()}
flood_table = tfm_by_name.get(b'flood', None)
self.assertTrue(flood_table)
if flood_table is not None:
self.assertGreater(flood_table.max_entries, self.NUM_PORTS * 2)
class ValveActiveLACPTestCase(ValveTestBases.ValveTestSmall):
"""Test LACP."""
CONFIG = """
dps:
s1:
%s
lacp_timeout: 5
interfaces:
p1:
number: 1
native_vlan: v100
lacp: 1
lacp_active: True
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
tagged_vlans: [v300]
vlans:
v100:
vid: 0x100
v200:
vid: 0x200
v300:
vid: 0x300
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
self.activate_all_ports()
def test_lacp(self):
"""Test LACP comes up."""
test_port = 1
labels = self.port_labels(test_port)
self.assertEqual(
1, int(self.get_prom('port_lacp_state', labels=labels)))
# Ensure LACP packet sent.
ofmsgs = self.valve.fast_advertise(self.mock_time(), None)[self.valve]
self.assertTrue(self.packet_outs_from_flows(ofmsgs))
self.rcv_packet(test_port, 0, {
'actor_system': '0e:00:00:00:00:02',
'partner_system': FAUCET_MAC,
'eth_dst': slow.SLOW_PROTOCOL_MULTICAST,
'eth_src': '0e:00:00:00:00:02',
'actor_state_synchronization': 1})
self.assertEqual(
3, int(self.get_prom('port_lacp_state', labels=labels)))
self.learn_hosts()
self.verify_expiry()
class ValveL2LearnTestCase(ValveTestBases.ValveTestSmall):
"""Test L2 Learning"""
def setUp(self):
self.setup_valve(CONFIG)
def test_expiry(self):
learn_labels = {
'vid': str(0x200),
'eth_src': self.P2_V200_MAC
}
self.assertEqual(
0, self.get_prom('learned_l2_port', labels=learn_labels))
self.learn_hosts()
self.assertEqual(
2.0, self.get_prom('learned_l2_port', labels=learn_labels))
self.verify_expiry()
self.assertEqual(
0, self.get_prom('learned_l2_port', labels=learn_labels))
class ValveMirrorTestCase(ValveTestBases.ValveTestBig):
"""Test ACL and interface mirroring."""
# TODO: check mirror packets are present/correct
CONFIG = """
acls:
mirror_ospf:
- rule:
nw_dst: '224.0.0.5'
dl_type: 0x800
actions:
mirror: p5
allow: 1
- rule:
dl_type: 0x800
actions:
allow: 0
- rule:
actions:
allow: 1
dps:
s1:
%s
interfaces:
p1:
number: 1
native_vlan: v100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
acls_in: [mirror_ospf]
p2:
number: 2
native_vlan: v200
tagged_vlans: [v100]
p3:
number: 3
tagged_vlans: [v100, v200]
p4:
number: 4
tagged_vlans: [v200]
p5:
number: 5
output_only: True
mirror: 4
vlans:
v100:
vid: 0x100
faucet_vips: ['10.0.0.254/24']
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
- route:
ip_dst: 10.99.98.0/24
ip_gw: 10.0.0.99
v200:
vid: 0x200
faucet_vips: ['fc00::1:254/112', 'fe80::1:254/64']
routes:
- route:
ip_dst: 'fc00::10:0/112'
ip_gw: 'fc00::1:1'
- route:
ip_dst: 'fc00::20:0/112'
ip_gw: 'fc00::1:99'
routers:
router1:
bgp:
as: 1
connect_mode: 'passive'
neighbor_as: 2
port: 9179
routerid: '1.1.1.1'
server_addresses: ['127.0.0.1']
neighbor_addresses: ['127.0.0.1']
vlan: v100
""" % DP1_CONFIG
def setUp(self):
self.setup_valve(self.CONFIG)
if __name__ == "__main__":
unittest.main() # pytype: disable=module-attr
| |
#! /usr/bin/env python2
#
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) Michigan State University, 2009-2015. It is licensed under
# the three-clause BSD license; see LICENSE. Contact: ctb@msu.edu
#
# pylint: disable=invalid-name,missing-docstring,no-member
from khmer import utils
"""
Find all reads connected to the given contigs on a per-partition basis.
% sweep-reads.py -r <range> <contigs fastp> \
<reads1> <reads2> ... <readsN>
"""
EPILOG = """
Output will be a collection of files corresponding to the partitions;
each partition gets a file (prefixed with the output prefix option),
which means this could output many tens or hundreds of thousands of files.
Users should plan accordingly.
This script is very lenient on IO errors, due to the large number of file
operations needed. Thus, errors opening a file for buffer flush or writing
a read to a file will not crash the program; instead, if there were errors,
the user will be warned at the end of execution. Errors with opening read files
are also handled -- we move on to the next read file if there is an error
opening.
"""
import screed
import sys
from collections import defaultdict
import os
import time
import khmer
from khmer.khmer_args import (build_hashbits_args, report_on_config, info)
from khmer.kfile import (check_input_files, check_valid_file_exists,
check_space)
from khmer.utils import write_record
DEFAULT_NUM_BUFFERS = 50000
DEFAULT_MAX_READS = 1000000
DEFAULT_BUFFER_SIZE = 10
DEFAULT_OUT_PREF = 'reads'
DEFAULT_RANGE = -1
MIN_HSIZE = 4e7
MIN_KSIZE = 21
def fmt_fasta(name, seq, labels=[]):
return '>{name}\t{labels}\n{seq}\n'.format(
name=name, labels='\t'.join([str(l) for l in labels]), seq=seq)
def fmt_fastq(name, seq, quality, labels=[]):
return '@{name}\t{labels}\n{seq}\n+\n{acc}\n'.format(
name=name, labels='\t'.join([str(l) for l in labels]), seq=seq,
acc=quality)
class ReadBuffer(object):
def __init__(self):
self.buf = []
def push(self, seq_str):
self.buf.append(seq_str)
def flush(self):
return ''.join(self.buf)
def is_full(self, full):
if len(self.buf) >= full:
return True
else:
return False
def __len__(self):
return len(self.buf)
class ReadBufferManager(object):
def __init__(self, max_buffers, max_reads, max_size, output_pref, outdir,
extension):
self.buffers = {}
self.buffer_counts = {}
self.max_buffers = max_buffers
self.max_reads = max_reads
self.extension = extension
self.output_pref = output_pref
self.outdir = outdir
self.buffer_flush = max_size
self.cur_reads = 0
self.cur_files = 0
self.num_write_errors = 0
self.num_file_errors = 0
print >>sys.stderr, '''Init new ReadBuffer [
Max Buffers: {num_bufs}
Max Reads: {max_reads}
Buffer flush: {buf_flush}
]'''.format(num_bufs=self.max_buffers, max_reads=self.max_reads,
buf_flush=self.buffer_flush)
def flush_buffer(self, buf_id):
fn = '{prefix}_{buffer_id}.{ext}'.format(prefix=self.output_pref,
buffer_id=buf_id,
ext=self.extension)
fpath = os.path.join(self.outdir, fn)
buf = self.buffers[buf_id]
try:
outfp = open(fpath, 'a')
except IOError as _:
print >>sys.stderr, '!! ERROR: {_} !!'.format(_=_)
print >>sys.stderr, '*** Failed to open {fn} for \
buffer flush'.format(fn=fpath)
self.num_file_errors += 1
else:
outfp.write(buf.flush())
outfp.close()
finally:
self.cur_reads -= len(buf)
del self.buffers[buf_id]
def queue(self, seq_str, buf_id):
if buf_id in self.buffers:
self.buffers[buf_id].push(seq_str)
if self.buffers[buf_id].is_full(self.buffer_flush):
self.flush_buffer(buf_id)
else:
new_buf = ReadBuffer()
new_buf.push(seq_str)
self.buffers[buf_id] = new_buf
self.cur_reads += 1
if self.cur_reads > self.max_reads:
print >>sys.stderr, '** Reached max num reads...'
self.flush_all()
if len(self.buffers) > self.max_buffers:
# self.clean_buffers(2)
print >>sys.stderr, '** Reached max num buffers...'
self.flush_all()
def flush_all(self):
print >>sys.stderr, '*** Flushing all to files...'
buf_ids = self.buffers.keys()
for buf_id in buf_ids:
self.flush_buffer(buf_id)
assert self.cur_reads == 0
def get_parser():
parser = build_hashbits_args('Takes a partitioned reference file \
and a list of reads, and sorts reads \
by which partition they connect to')
parser.epilog = EPILOG
parser.add_argument(
'-r', '--traversal_range', type=int, dest='traversal_range',
default=DEFAULT_RANGE, help='depth of breadth-first search to perform\
from each read')
parser.add_argument('-b', '--buffer_size', dest='max_reads', type=int,
default=DEFAULT_MAX_READS,
help='Max total reads to buffer before flushing')
parser.add_argument('-l', '--buffer_length', dest='buffer_size', type=int,
default=DEFAULT_BUFFER_SIZE,
help='Max length of an individual label buffer \
before flushing')
parser.add_argument('--prefix', dest='output_prefix',
default=DEFAULT_OUT_PREF,
help='Prefix for sorted read files')
parser.add_argument('--outdir', dest='outdir',
help='output directory; default is location of \
fastp file')
parser.add_argument('-m', '--max_buffers', dest='max_buffers', type=int,
default=DEFAULT_NUM_BUFFERS,
help='Max individual label buffers before flushing')
labeling = parser.add_mutually_exclusive_group(required=True)
labeling.add_argument('--label-by-pid', dest='label_by_pid',
action='store_true', help='separate reads by\
reference partition id')
labeling.add_argument('--label-by-seq', dest='label_by_seq',
action='store_true', help='separate reads by\
reference sequence')
labeling.add_argument('--label-by-group', dest='group_size', type=int,
help='separate reads by arbitrary sized groups\
of reference sequences')
parser.add_argument(dest='input_fastp', help='Reference fasta or fastp')
parser.add_argument('input_files', nargs='+',
help='Reads to be swept and sorted')
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exists')
return parser
def main():
info('sweep-reads-buffered.py', ['sweep'])
parser = get_parser()
args = parser.parse_args()
if args.min_tablesize < MIN_HSIZE:
args.min_tablesize = MIN_HSIZE
if args.ksize < MIN_KSIZE:
args.ksize = MIN_KSIZE
report_on_config(args, hashtype='hashbits')
K = args.ksize
HT_SIZE = args.min_tablesize
N_HT = args.n_tables
traversal_range = args.traversal_range
input_fastp = args.input_fastp
if not args.outdir:
outdir = os.path.dirname(input_fastp)
else:
outdir = args.outdir
max_buffers = args.max_buffers
output_pref = args.output_prefix
buf_size = args.buffer_size
max_reads = args.max_reads
check_input_files(args.input_fastp, args.force)
check_valid_file_exists(args.input_files)
all_input_files = [input_fastp]
all_input_files.extend(args.input_files)
# Check disk space availability
check_space(all_input_files, args.force)
# figure out input file type (FA/FQ) -- based on first file
ix = iter(screed.open(args.input_files[0]))
record = ix.next()
del ix
extension = 'fa'
if hasattr(record, 'quality'): # fastq!
extension = 'fq'
output_buffer = ReadBufferManager(
max_buffers, max_reads, buf_size, output_pref, outdir, extension)
# consume the partitioned fasta with which to label the graph
ht = khmer.LabelHash(K, HT_SIZE, N_HT)
try:
print >>sys.stderr, 'consuming input sequences...'
if args.label_by_pid:
print >>sys.stderr, '...labeling by partition id (pid)'
ht.consume_partitioned_fasta_and_tag_with_labels(input_fastp)
elif args.label_by_seq:
print >>sys.stderr, '...labeling by sequence'
for n, record in enumerate(screed.open(input_fastp)):
if n % 50000 == 0:
print >>sys.stderr, \
'...consumed {n} sequences...'.format(n=n)
ht.consume_sequence_and_tag_with_labels(record.sequence, n)
else:
print >>sys.stderr, \
'...labeling to create groups of size {s}'.format(
s=args.group_size)
label = -1
g = 0
try:
outfp = open('{pref}_base_{g}.{ext}'.format(pref=output_pref,
g=g,
ext=extension
), 'wb')
for n, record in enumerate(screed.open(input_fastp)):
if n % args.group_size == 0:
label += 1
if label > g:
g = label
outfp = open('{pref}_base_{g}.{ext}'.format(
pref=output_pref, g=g,
ext=extension), 'wb')
if n % 50000 == 0:
print >>sys.stderr, \
'...consumed {n} sequences...'.format(n=n)
ht.consume_sequence_and_tag_with_labels(record.sequence,
label)
write_record(record, outfp)
except IOError as e:
print >>sys.stderr, '!! ERROR !!', e
print >>sys.stderr, '...error splitting input. exiting...'
except IOError as e:
print >>sys.stderr, '!! ERROR: !!', e
print >>sys.stderr, '...error consuming \
{i}. exiting...'.format(i=input_fastp)
print >>sys.stderr, 'done consuming input sequence. \
added {t} tags and {l} \
labels...'.format(t=ht.n_tags(), l=ht.n_labels())
label_dict = defaultdict(int)
label_number_dist = []
n_orphaned = 0
n_labeled = 0
n_mlabeled = 0
total_t = time.clock()
start_t = time.clock()
for read_file in args.input_files:
print >>sys.stderr, '** sweeping {read_file} for labels...'.format(
read_file=read_file)
file_t = 0.0
try:
read_fp = screed.open(read_file)
except IOError as error:
print >>sys.stderr, '!! ERROR: !!', error
print >>sys.stderr, '*** Could not open {fn}, skipping...'.format(
fn=read_file)
else:
for _, record in enumerate(read_fp):
if _ % 50000 == 0:
end_t = time.clock()
batch_t = end_t - start_t
file_t += batch_t
print >>sys.stderr, '\tswept {n} reads [{nc} labeled, \
{no} orphaned] \
** {sec}s ({sect}s total)' \
.format(n=_, nc=n_labeled,
no=n_orphaned,
sec=batch_t, sect=file_t)
start_t = time.clock()
seq = record.sequence
name = record.name
try:
labels = ht.sweep_label_neighborhood(seq, traversal_range)
except ValueError as e:
pass
else:
if hasattr(record, 'quality'):
seq_str = fmt_fastq(name, seq, record.quality, labels)
else:
seq_str = fmt_fasta(name, seq, labels)
label_number_dist.append(len(labels))
if labels:
n_labeled += 1
if len(labels) > 1:
output_buffer.queue(seq_str, 'multi')
n_mlabeled += 1
label_dict['multi'] += 1
else:
output_buffer.queue(seq_str, labels[0])
label_dict[labels[0]] += 1
else:
n_orphaned += 1
output_buffer.queue(seq_str, 'orphaned')
label_dict['orphaned'] += 1
print >>sys.stderr, '** End of file {fn}...'.format(fn=read_file)
output_buffer.flush_all()
read_fp.close()
# gotta output anything left in the buffers at the end!
print >>sys.stderr, '** End of run...'
output_buffer.flush_all()
total_t = time.clock() - total_t
if output_buffer.num_write_errors > 0 or output_buffer.num_file_errors > 0:
print >>sys.stderr, '! WARNING: Sweep finished with errors !'
print >>sys.stderr, '** {writee} reads not written'.format(
writee=output_buffer.num_write_errors)
print >>sys.stderr, '** {filee} errors opening files'.format(
filee=output_buffer.num_file_errors)
print >>sys.stderr, 'swept {n_reads} for labels...'.format(
n_reads=n_labeled + n_orphaned)
print >>sys.stderr, '...with {nc} labeled and {no} orphaned'.format(
nc=n_labeled, no=n_orphaned)
print >>sys.stderr, '...and {nmc} multilabeled'.format(nmc=n_mlabeled)
print >>sys.stderr, '** outputting label number distribution...'
fn = os.path.join(outdir, '{pref}.dist.txt'.format(pref=output_pref))
with open(fn, 'wb') as outfp:
for nc in label_number_dist:
outfp.write('{nc}\n'.format(nc=nc))
fn = os.path.join(outdir, '{pref}.counts.csv'.format(pref=output_pref))
print >>sys.stderr, '** outputting label read counts...'
with open(fn, 'wb') as outfp:
for k in label_dict:
outfp.write('{l},{c}\n'.format(l=k, c=label_dict[k]))
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2014 Measurement Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Determine which input files should be processed for Observatory."""
import logging
import os
import telescope_data_parser
class MetadataWhitelist(object):
"""Whitelist of datasets based on metadata attributes."""
def __init__(self):
self._whitelisted_keys = set()
def __iter__(self):
for key in self._whitelisted_keys:
yield key
def is_whitelisted(self, site_name, isp):
"""Indicates if the given dataset is whitelisted."""
key = self._dataset_key_from_metadata(site_name, isp)
return key in self._whitelisted_keys
def add(self, site_name, isp):
"""Adds a dataset to the whitelist."""
key = self._dataset_key_from_metadata(site_name, isp)
self._whitelisted_keys.add(key)
def _dataset_key_from_metadata(self, site_name, isp):
"""Derives a key for a particular dataset
Derives a whitelist key for a dataset based on metadata attributes.
Args:
site_name: (str) The name of the M-Lab site (e.g. 'lga01').
isp: (str) The name of the client ISP (e.g. 'verizon').
Returns:
(str) Key of the form '[site]_[isp]', for example: 'lga01_verizon'.
"""
return '%s_%s' % (site_name, isp)
class MetadataWhitelistSerializer(object):
"""Converts a whitelist to/from a file."""
def serialize(self, whitelist, output_file):
"""Serialize a whitelist to a file.
Args:
whitelist: (MetadataWhitelist) Whitelist to serialize.
output_file: (file) File object to which to write the serialized data.
"""
keys_sorted = sorted(whitelist)
output_file.write(os.linesep.join(keys_sorted))
def deserialize(self, whitelist_file):
"""Parses a whitelist from a file.
Args:
whitelist_file: (file) File object from which to parse the whitelist.
Returns:
(MetadataWhitelist) Whitelist parsed from the file.
"""
whitelist = MetadataWhitelist()
for line in whitelist_file:
site_name, isp = line.strip().split('_')
whitelist.add(site_name, isp)
return whitelist
class MetadataWhitelistUpdater(object):
"""Updates the whitelist with new datasets that meet sample requirements.
The update process keeps all the datasets that are currently in the whitelist
because Observatory should not unpublish datasets that have previously been
published. Updating checks all other datasets to see if there are new
datasets that now meet sample size thresholds (either because they did not
exist at the last check or their sample count has increased to meet
requirements).
"""
def __init__(self, existing_whitelist, sample_count_checker):
"""Creates a new whitelist updater.
Args:
existing_whitelist: (MetadataWhitelist) Current whitelist before adding
new datasets.
sample_count_checker: (SampleCountChecker) Object to check whether
datasets meet sample count thresholds.
"""
self.whitelist = existing_whitelist
self._logger = logging.getLogger('telescope-convert')
self._sample_count_checker = sample_count_checker
self._checked_datasets = {}
def update(self, filenames):
"""Updates whitelist by checking sample counts of provided datasets.
Args:
filenames: (list) A list of Telescope data files. Any datasets contained
in these files will be added to the whitelist if the dataset meets
sample count requirements.
Returns:
(bool) True if datasets were added to the whitelist.
"""
added_new_datasets = False
# Check sample counts for all non-whitelisted datasets.
for filename in filenames:
self._check_file(filename)
# Go through the analyzed datasets and add any datasets to the whitelist if
# they have sufficient samples.
for key, metadata in self._checked_datasets.iteritems():
if self._sample_count_checker.has_enough_samples(key):
site_name = metadata['site_name']
isp = metadata['isp']
self.whitelist.add(site_name, isp)
self._logger.info('Adding new dataset to whitelist: %s_%s', site_name,
isp)
added_new_datasets = True
return added_new_datasets
def _check_file(self, filename):
"""Analyze a data file to see if it should be whitelisted.
Args:
filename: (str) Filename of Telescope data file to check.
"""
self._logger.info('Checking file for whitelist: %s', filename)
result_reader = telescope_data_parser.SingleTelescopeResultReader(filename)
metadata = result_reader.get_metadata()
# Because we don't use any metric except download throughput, there is no
# need to waste time parsing the data for other metrics.
if metadata['metric_name'] != 'download_throughput':
return
# We don't need to check files that are already whitelisted.
if self.whitelist.is_whitelisted(metadata['site_name'], metadata['isp']):
return
dataset_key = self._dataset_key_from_metadata(metadata)
self._sample_count_checker.add_to_counts(dataset_key, result_reader)
self._checked_datasets[dataset_key] = metadata
def _dataset_key_from_metadata(self, metadata):
"""Derives a key for a particular dataset based on supplied metadata.
Args:
metadata: (dict) A dictionary of metadata describing Telescope results.
Returns:
(str) Key of the form '[site]-[isp]-[metric]', for example:
'lga01-comcast-minimum_rtt'.
"""
dataset_key = '%s-%s-%s' % (metadata['site_name'], metadata['isp'],
metadata['metric_name'])
return dataset_key
class DataFileWhitelistChecker(object):
def __init__(self, whitelist):
"""Checks whether sample counts for given files meet the sample thresholds.
Args:
whitelist: (MetadataWhitelist) Whitelist to use to check files.
"""
self._whitelist = whitelist
def is_whitelisted(self, filename):
"""Indicates whether a file is part of a whitelisted dataset.
Args:
filename: (str) Filename to evaluate.
Returns:
(bool) True if the given filename is whitelisted because it is part of a
dataset that meets the sample size requirements.
"""
result_reader = telescope_data_parser.SingleTelescopeResultReader(filename)
metadata = result_reader.get_metadata()
return self._whitelist.is_whitelisted(metadata['site_name'],
metadata['isp'])
| |
from dataclasses import dataclass
import itertools
from os import PathLike
from typing import Iterable, Iterator, Optional, Union, TypeVar, Dict, List
import logging
import warnings
import torch.distributed as dist
from allennlp.data.instance import Instance
from allennlp.common import util
from allennlp.common.registrable import Registrable
logger = logging.getLogger(__name__)
@dataclass
class WorkerInfo:
"""
Contains information about the worker context when a `DatasetReader`
is being used within a multi-process `DataLoader`.
From a `DatasetReader` this can accessed with the [`get_worker_info()`](#get_worker_info) method.
"""
num_workers: int
"""
The total number of workers.
"""
id: int
"""
The 0-indexed ID of the current worker.
"""
@dataclass
class DistributedInfo:
"""
Contains information about the global process rank and total world size when the reader is being
used within distributed training.
From a `DatasetReader` this can be accessed with the [`get_distributed_info()`](#get_distributed_info) method.
"""
world_size: int
"""
The total number of processes in the distributed group.
"""
global_rank: int
"""
The 0-indexed ID of the current process within the distributed group.
This will be between 0 and `world_size - 1`, inclusive.
"""
_T = TypeVar("_T")
PathOrStr = Union[PathLike, str]
DatasetReaderInput = Union[PathOrStr, List[PathOrStr], Dict[str, PathOrStr]]
class DatasetReader(Registrable):
"""
A `DatasetReader` knows how to turn a file containing a dataset into a collection
of `Instance`s. To implement your own, just override the [`_read(file_path)`](#_read) method
to return an `Iterable` of the instances. Ideally this should be a lazy generator
that yields them one at a time.
All parameters necessary to `_read` the data apart from the filepath should be passed
to the constructor of the `DatasetReader`.
You should also implement [`text_to_instance(*inputs)`](#text_to_instance),
which should be used to turn raw data into `Instance`s. This method is required
in order to use a `Predictor` with your reader.
Usually the `_read()` method is implemented to call `text_to_instance()`.
# Parameters
max_instances : `int`, optional (default=`None`)
If given, will stop reading after this many instances. This is a useful setting for debugging.
Setting this disables caching.
manual_distributed_sharding: `bool`, optional (default=`False`)
By default, when used in a distributed setting, `DatasetReader` makes sure that each
trainer process only receives a subset of the data. It does this by reading the whole
dataset in each worker, but filtering out the instances that are not needed.
While this ensures that each worker will recieve unique instances, it's not a very efficient
way to do so since each worker still needs to process every single instance.
A better way to handle this is to manually handle the filtering within your `_read()`
method, in which case you should set `manual_distributed_sharding` to `True` so that
the base class knows that you handling the filtering.
See the section below about how to do this.
manual_multiprocess_sharding : `bool`, optional (default=`False`)
This is similar to the `manual_distributed_sharding` parameter, but applies to
multi-process data loading. By default, when this reader is used by a multi-process
data loader (i.e. a `DataLoader` with `num_workers > 1`), each worker will
filter out all but a subset of the instances that are needed so that you
don't end up with duplicates.
However, there is really no benefit to using multiple workers in your `DataLoader`
unless you implement the sharding within your `_read()` method, in which
case you should set `manual_multiprocess_sharding` to `True`, just as with
`manual_distributed_sharding`.
See the section below about how to do this.
serialization_dir: `str`, optional (default=`None`)
The directory in which the training output is saved to, or the directory the model is loaded from.
!!! Note
This is typically not given an entry in a configuration file. It will be set automatically
when using the built-in `allennp` commands.
# Using your reader with multi-process or distributed data loading
There are two things you may need to update in your `DatasetReader` in order for
it to be efficient in the multi-process or distributed data loading context.
1. The `_read()` method should handle filtering out all but the instances that
each particular worker should generate.
This is important because the default mechanism for filtering out `Instance`s in
the distributed or multi-process `DataLoader` setting is not very efficient, since every
worker would still need to process every single `Instance` in your dataset.
But by manually handling the filtering / sharding within your `_read()` method, each
worker only needs to perform a subset of the work required to create instances.
For example, if you were training using 2 GPUs and your `_read()` method reads a file
line-by-line, creating one `Instance` for each line, you could just check the node
rank within `_read()` and then throw away every other line starting at the line number
corresponding to the node rank.
The helper method [`shard_iterable()`](#shard_iterable) is there to make this easy for you.
You can wrap this around any iterable object in your `_read()` method, and it will
return an iterator that skips the right items based on the distributed training
or multi-process loading context. This method can always be called regardless
of whether or not you're actually using distributed training or multi-process loading.
Remember though that when you handle the sharding manually within `_read()`, you need
to let the `DatasetReader` know about this so that it doesn't do any additional
filtering. Therefore you need to ensure that both `self.manual_distributed_sharding` and
`self.manual_multiprocess_sharding` are set to `True`.
If you call the helper method `shard_iterable()` without setting these to `True`,
you'll get an exception.
2. If the instances generated by `_read()` contain `TextField`s, those `TextField`s
should not have any token indexers assigned. The token indexers need to be applied
in the [`apply_token_indexers()`](#apply_token_indexers) method instead.
This is highly recommended because if the instances generated by your `_read()` method
have token indexers attached, those indexers will be duplicated when they are sent across
processes. If your token indexers contain large objects (such as `PretrainedTransformerTokenIndexer`s)
this could take up a massive amount of memory.
"""
def __init__(
self,
max_instances: Optional[int] = None,
manual_distributed_sharding: bool = False,
manual_multiprocess_sharding: bool = False,
serialization_dir: Optional[str] = None,
) -> None:
# Do some validation.
if max_instances is not None and max_instances < 0:
raise ValueError("If specified, max_instances should be a positive int")
self.max_instances = max_instances
self.manual_distributed_sharding = manual_distributed_sharding
self.manual_multiprocess_sharding = manual_multiprocess_sharding
self.serialization_dir = serialization_dir
self._worker_info: Optional[WorkerInfo] = None
self._distributed_info: Optional[DistributedInfo] = None
# If we're actually in the main process, we can find the info using torch utils.
if util.is_distributed():
self._distributed_info = DistributedInfo(dist.get_world_size(), dist.get_rank())
def read(self, file_path: DatasetReaderInput) -> Iterator[Instance]:
"""
Returns an iterator of instances that can be read from the file path.
"""
for instance in self._multi_worker_islice(self._read(file_path)): # type: ignore
if self._worker_info is None:
# If not running in a subprocess, it's safe to apply the token_indexers right away.
self.apply_token_indexers(instance)
yield instance
def _read(self, file_path) -> Iterable[Instance]:
"""
Reads the instances from the given `file_path` and returns them as an
`Iterable`.
You are strongly encouraged to use a generator so that users can
read a dataset in a lazy way, if they so choose.
"""
# NOTE: `file_path` is left untyped here on purpose.
# Technically the type should be `DatasetReaderInput`, but many subclass
# implementations of `DatasetReader` define their `_read()` method to take a more
# specific type, such as just `str`. But that would be a type error
# according to mypy: https://mypy.readthedocs.io/en/stable/common_issues.html#incompatible-overrides
raise NotImplementedError
def text_to_instance(self, *inputs) -> Instance:
"""
Does whatever tokenization or processing is necessary to go from textual input to an
`Instance`. The primary intended use for this is with a
:class:`~allennlp.predictors.predictor.Predictor`, which gets text input as a JSON
object and needs to process it to be input to a model.
The intent here is to share code between :func:`_read` and what happens at
model serving time, or any other time you want to make a prediction from new data. We need
to process the data in the same way it was done at training time. Allowing the
`DatasetReader` to process new text lets us accomplish this, as we can just call
`DatasetReader.text_to_instance` when serving predictions.
The input type here is rather vaguely specified, unfortunately. The `Predictor` will
have to make some assumptions about the kind of `DatasetReader` that it's using, in order
to pass it the right information.
"""
raise NotImplementedError
def apply_token_indexers(self, instance: Instance) -> None:
"""
If `Instance`s created by this reader contain `TextField`s without `token_indexers`,
this method can be overriden to set the `token_indexers` of those fields.
E.g. if you have you have `"source"` `TextField`, you could implement this method like this:
```python
def apply_token_indexers(self, instance: Instance) -> None:
instance["source"].token_indexers = self._token_indexers
```
If your `TextField`s are wrapped in a `ListField`, you can access them via `field_list`.
E.g. if you had a `"source"` field of `ListField[TextField]` objects, you could:
```python
for text_field in instance["source"].field_list:
text_field.token_indexers = self._token_indexers
```
"""
pass
def get_worker_info(self) -> Optional[WorkerInfo]:
"""
Provides a [`WorkerInfo`](#WorkerInfo) object when the reader is being used within a
worker of a multi-process `DataLoader`.
If the reader is in the main process, this is just `None`.
!!! NOTE
This is different than distributed training. If the `DatasetReader`
is being used within distributed training, `get_worker_info()` will only
provide information on the `DataLoader` worker within its node.
Use [`get_distributed_info`](#get_distributed_info) to get information on distributed
training context.
"""
return self._worker_info
def get_distributed_info(self) -> Optional[DistributedInfo]:
"""
Provides a [`DistributedInfo`](#DistributedInfo) object when the reader is being
used within distributed training.
If not in distributed training, this is just `None`.
"""
return self._distributed_info
def _set_worker_info(self, info: Optional[WorkerInfo]) -> None:
"""
Should only be used internally.
"""
self._worker_info = info
def _set_distributed_info(self, info: Optional[DistributedInfo]) -> None:
"""
Should only be used internally.
"""
self._distributed_info = info
def shard_iterable(self, iterable: Iterable[_T]) -> Iterator[_T]:
"""
Helper method that determines which items in an iterable object to skip based
on the current node rank (for distributed training) and worker ID (for multi-process data loading).
"""
if not self.manual_distributed_sharding or not self.manual_multiprocess_sharding:
raise ValueError(
"self.shard_iterable() was called but self.manual_distributed_sharding and "
"self.manual_multiprocess_sharding was not set to True. Did you forget to call "
"super().__init__(manual_distributed_sharding=True, manual_multiprocess_sharding=True) "
"in your constructor?"
)
sharded_slice: Iterator[_T] = iter(iterable)
if util.is_distributed():
sharded_slice = itertools.islice(
sharded_slice, dist.get_rank(), None, dist.get_world_size()
)
if self._worker_info is not None:
sharded_slice = itertools.islice(
sharded_slice, self._worker_info.id, None, self._worker_info.num_workers
)
# We don't know for sure how many instances we have to produce.
# _multi_worker_islice() figures that out. But we know for sure
# it won't be more than max_instances.
if self.max_instances is not None:
sharded_slice = itertools.islice(sharded_slice, self.max_instances)
return sharded_slice
def _multi_worker_islice(
self,
iterable: Iterable[_T],
) -> Iterator[_T]:
"""
This is just like `shard_iterable` but is for internal use only.
It has some additional logic to handle `max_instances` based on the distributed
or multi-process context, and whether or not sharding is handled manually
in the `_read()` method.
"""
# This has some complicated logic because any given reader may or may not
# implement manual multi-process and manual distributed sharding itself.
# We have to handle all possibilities.
sharded_slice: Iterator[_T] = iter(iterable)
# We'll adjust max_instances as we go, depending on what sort of sharding is done.
# At the end, we want to ensure the total number of instances collected across
# all workers processes is equal to self.max_instances.
max_instances = self.max_instances
if self._distributed_info is not None:
if max_instances is not None:
# Need to scale down max_instances because otherwise each node would read self.max_instances,
# but we really want self.max_instances total across all nodes.
if self._distributed_info.global_rank < (
max_instances % self._distributed_info.world_size
):
max_instances = max_instances // self._distributed_info.world_size + 1
else:
max_instances = max_instances // self._distributed_info.world_size
if not self.manual_distributed_sharding:
sharded_slice = itertools.islice(
sharded_slice,
self._distributed_info.global_rank,
None,
self._distributed_info.world_size,
)
if self._worker_info is not None:
if max_instances is not None:
# Like in the distributed case above, we need to adjust max_instances.
if self._worker_info.id < (max_instances % self._worker_info.num_workers):
max_instances = max_instances // self._worker_info.num_workers + 1
else:
max_instances = max_instances // self._worker_info.num_workers
if not self.manual_multiprocess_sharding:
warnings.warn(
"Using multi-process data loading without setting "
"DatasetReader.manual_multiprocess_sharding to True.\n"
"Did you forget to set this?\n"
"If you're not handling the multi-process sharding logic within your "
"_read() method, there is probably no benefit to using more than one "
"worker.",
UserWarning,
)
sharded_slice = itertools.islice(
sharded_slice, self._worker_info.id, None, self._worker_info.num_workers
)
if max_instances is not None:
sharded_slice = itertools.islice(sharded_slice, max_instances)
return sharded_slice
| |
# -*- coding: utf-8 -*-
import httplib as http
import logging
from bs4 import BeautifulSoup
from flask import request
from framework.mongo.utils import to_mongo_key
from framework.exceptions import HTTPError
from framework.auth.utils import privacy_info_handle
from framework.auth.decorators import must_be_logged_in
from framework.flask import redirect
from website.addons.wiki import settings
from website.addons.wiki import utils as wiki_utils
from website.profile.utils import get_gravatar
from website.project.views.node import _view_project
from website.project.model import has_anonymous_link
from website.project.decorators import (
must_be_contributor_or_public,
must_have_addon, must_not_be_registration,
must_be_valid_project,
must_have_permission,
must_have_write_permission_or_public_wiki,
)
from website.exceptions import NodeStateError
from .exceptions import (
NameEmptyError,
NameInvalidError,
NameMaximumLengthError,
PageCannotRenameError,
PageConflictError,
PageNotFoundError,
InvalidVersionError,
)
from .model import NodeWikiPage
logger = logging.getLogger(__name__)
WIKI_NAME_EMPTY_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be empty.'
))
WIKI_NAME_MAXIMUM_LENGTH_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page name cannot be more than 100 characters.'
))
WIKI_PAGE_CANNOT_RENAME_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The wiki page cannot be renamed.'
))
WIKI_PAGE_CONFLICT_ERROR = HTTPError(http.CONFLICT, data=dict(
message_short='Page conflict',
message_long='A wiki page with that name already exists.'
))
WIKI_PAGE_NOT_FOUND_ERROR = HTTPError(http.NOT_FOUND, data=dict(
message_short='Not found',
message_long='A wiki page could not be found.'
))
WIKI_INVALID_VERSION_ERROR = HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='The requested version of this wiki page does not exist.'
))
def _get_wiki_versions(node, name, anonymous=False):
key = to_mongo_key(name)
# Skip if wiki_page doesn't exist; happens on new projects before
# default "home" page is created
if key not in node.wiki_pages_versions:
return []
versions = [
NodeWikiPage.load(version_wiki_id)
for version_wiki_id in node.wiki_pages_versions[key]
]
return [
{
'version': version.version,
'user_fullname': privacy_info_handle(version.user.fullname, anonymous, name=True),
'date': '{} UTC'.format(version.date.replace(microsecond=0).isoformat().replace('T', ' ')),
}
for version in reversed(versions)
]
def _get_wiki_pages_current(node):
return [
{
'name': sorted_page.page_name,
'url': node.web_url_for('project_wiki_view', wname=sorted_page.page_name, _guid=True),
'wiki_id': sorted_page._primary_key,
'wiki_content': wiki_page_content(sorted_page.page_name, node=node)
}
for sorted_page in [
node.get_wiki_page(sorted_key)
for sorted_key in sorted(node.wiki_pages_current)
]
# TODO: remove after forward slash migration
if sorted_page is not None
]
def _get_wiki_api_urls(node, name, additional_urls=None):
urls = {
'base': node.api_url_for('project_wiki_home'),
'delete': node.api_url_for('project_wiki_delete', wname=name),
'rename': node.api_url_for('project_wiki_rename', wname=name),
'content': node.api_url_for('wiki_page_content', wname=name),
'settings': node.api_url_for('edit_wiki_settings'),
'grid': node.api_url_for('project_wiki_grid_data', wname=name)
}
if additional_urls:
urls.update(additional_urls)
return urls
def _get_wiki_web_urls(node, key, version=1, additional_urls=None):
urls = {
'base': node.web_url_for('project_wiki_home', _guid=True),
'edit': node.web_url_for('project_wiki_view', wname=key, _guid=True),
'home': node.web_url_for('project_wiki_home', _guid=True),
'page': node.web_url_for('project_wiki_view', wname=key, _guid=True),
}
if additional_urls:
urls.update(additional_urls)
return urls
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_widget(**kwargs):
node = kwargs['node'] or kwargs['project']
wiki = node.get_addon('wiki')
wiki_page = node.get_wiki_page('home')
more = False
use_python_render = False
if wiki_page and wiki_page.html(node):
wiki_html = wiki_page.html(node)
if len(wiki_html) > 500:
wiki_html = BeautifulSoup(wiki_html[:500] + '...', 'html.parser')
more = True
else:
wiki_html = BeautifulSoup(wiki_html)
more = False
use_python_render = wiki_page.rendered_before_update
else:
wiki_html = None
ret = {
'complete': True,
'wiki_content': unicode(wiki_html) if wiki_html else None,
'wiki_content_url': node.api_url_for('wiki_page_content', wname='home'),
'use_python_render': use_python_render,
'more': more,
'include': False,
}
ret.update(wiki.config.to_json())
return ret
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_have_addon('wiki', 'node')
def wiki_page_draft(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname)
return {
'wiki_content': wiki_page.content if wiki_page else None,
'wiki_draft': (wiki_page.get_draft(node) if wiki_page
else wiki_utils.get_sharejs_content(node, wname)),
}
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def wiki_page_content(wname, wver=None, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(wname, version=wver)
use_python_render = wiki_page.rendered_before_update if wiki_page else False
return {
'wiki_content': wiki_page.content if wiki_page else '',
# Only return rendered version if page was saved before wiki change
'wiki_rendered': wiki_page.html(node) if use_python_render else '',
}
@must_be_valid_project # injects project
@must_have_permission('write') # injects user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_delete(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
if not wiki_page:
raise HTTPError(http.NOT_FOUND)
node.delete_node_wiki(wiki_name, auth)
wiki_utils.broadcast_to_sharejs('delete', sharejs_uuid, node)
return {}
@must_be_valid_project # returns project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_view(auth, wname, path=None, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
wiki_name = (wname or '').strip()
wiki_key = to_mongo_key(wiki_name)
wiki_page = node.get_wiki_page(wiki_name)
wiki_settings = node.get_addon('wiki')
can_edit = (
auth.logged_in
and not node.is_registration
and (
node.has_permission(auth.user, 'write')
or wiki_settings.is_publicly_editable
)
)
versions = _get_wiki_versions(node, wiki_name, anonymous=anonymous)
# Determine panels used in view
panels = {'view', 'edit', 'compare', 'menu'}
if request.args and set(request.args).intersection(panels):
panels_used = [panel for panel in request.args if panel in panels]
num_columns = len(set(panels_used).intersection({'view', 'edit', 'compare'}))
if num_columns == 0:
panels_used.append('view')
num_columns = 1
else:
panels_used = ['view', 'menu']
num_columns = 1
try:
view = wiki_utils.format_wiki_version(
version=request.args.get('view'),
num_versions=len(versions),
allow_preview=True,
)
compare = wiki_utils.format_wiki_version(
version=request.args.get('compare'),
num_versions=len(versions),
allow_preview=False,
)
except InvalidVersionError:
raise WIKI_INVALID_VERSION_ERROR
# Default versions for view and compare
version_settings = {
'view': view or ('preview' if 'edit' in panels_used else 'current'),
'compare': compare or 'previous',
}
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
version = wiki_page.version
is_current = wiki_page.is_current
content = wiki_page.html(node)
use_python_render = wiki_page.rendered_before_update
else:
version = 'NA'
is_current = False
content = ''
use_python_render = False
if can_edit:
if wiki_key not in node.wiki_private_uuids:
wiki_utils.generate_private_uuid(node, wiki_name)
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, wiki_name)
else:
if wiki_key not in node.wiki_pages_current and wiki_key != 'home':
raise WIKI_PAGE_NOT_FOUND_ERROR
if 'edit' in request.args:
if wiki_settings.is_publicly_editable:
raise HTTPError(http.UNAUTHORIZED)
raise HTTPError(http.FORBIDDEN)
sharejs_uuid = None
ret = {
'wiki_id': wiki_page._primary_key if wiki_page else None,
'wiki_name': wiki_page.page_name if wiki_page else wiki_name,
'wiki_content': content,
'use_python_render': use_python_render,
'page': wiki_page,
'version': version,
'versions': versions,
'sharejs_uuid': sharejs_uuid or '',
'sharejs_url': settings.SHAREJS_URL,
'is_current': is_current,
'version_settings': version_settings,
'pages_current': _get_wiki_pages_current(node),
'category': node.category,
'panels_used': panels_used,
'num_columns': num_columns,
'urls': {
'api': _get_wiki_api_urls(node, wiki_name, {
'content': node.api_url_for('wiki_page_content', wname=wiki_name),
'draft': node.api_url_for('wiki_page_draft', wname=wiki_name),
}),
'web': _get_wiki_web_urls(node, wiki_name),
'gravatar': get_gravatar(auth.user, 25),
},
}
ret.update(_view_project(node, auth, primary=True))
ret['user']['can_edit_wiki_body'] = can_edit
return ret
@must_be_valid_project # injects node or project
@must_have_write_permission_or_public_wiki # injects user
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit_post(auth, wname, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
wiki_page = node.get_wiki_page(wiki_name)
redirect_url = node.web_url_for('project_wiki_view', wname=wiki_name, _guid=True)
form_wiki_content = request.form['content']
# ensure home is always lower case since it cannot be renamed
if wiki_name.lower() == 'home':
wiki_name = 'home'
if wiki_page:
# Only update node wiki if content has changed
if form_wiki_content != wiki_page.content:
node.update_node_wiki(wiki_page.page_name, form_wiki_content, auth)
ret = {'status': 'success'}
else:
ret = {'status': 'unmodified'}
else:
# update_node_wiki will create a new wiki page because a page
node.update_node_wiki(wiki_name, form_wiki_content, auth)
ret = {'status': 'success'}
return ret, http.FOUND, None, redirect_url
@must_be_valid_project # injects node or project
@must_have_permission('admin')
@must_not_be_registration
@must_have_addon('wiki', 'node')
def edit_wiki_settings(node, auth, **kwargs):
wiki_settings = node.get_addon('wiki')
permissions = request.get_json().get('permission', None)
if not wiki_settings:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Cannot change wiki settings without a wiki'
))
if permissions == 'public':
permissions = True
elif permissions == 'private':
permissions = False
else:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid request',
message_long='Permissions flag used is incorrect.'
))
try:
wiki_settings.set_editing(permissions, auth, log=True)
except NodeStateError as e:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=e.message
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_logged_in
@must_be_valid_project
def get_node_wiki_permissions(node, auth, **kwargs):
return wiki_utils.serialize_wiki_settings(auth.user, [node._id])
@must_be_valid_project
@must_have_addon('wiki', 'node')
def project_wiki_home(**kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname='home', _guid=True))
@must_be_valid_project # injects project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_id_page(auth, wid, **kwargs):
node = kwargs['node'] or kwargs['project']
wiki_page = node.get_wiki_page(id=wid)
if wiki_page:
return redirect(node.web_url_for('project_wiki_view', wname=wiki_page.page_name, _guid=True))
else:
raise WIKI_PAGE_NOT_FOUND_ERROR
@must_be_valid_project
@must_have_write_permission_or_public_wiki
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_edit(wname, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?edit&view&menu')
@must_be_valid_project
@must_be_contributor_or_public
@must_have_addon('wiki', 'node')
def project_wiki_compare(wname, wver, **kwargs):
node = kwargs['node'] or kwargs['project']
return redirect(node.web_url_for('project_wiki_view', wname=wname, _guid=True) + '?view&compare={0}&menu'.format(wver))
@must_not_be_registration
@must_have_permission('write')
@must_have_addon('wiki', 'node')
def project_wiki_rename(auth, wname, **kwargs):
"""View that handles user the X-editable input for wiki page renaming.
:param wname: The target wiki page name.
:param-json value: The new wiki page name.
"""
node = kwargs['node'] or kwargs['project']
wiki_name = wname.strip()
new_wiki_name = request.get_json().get('value', None)
try:
node.rename_node_wiki(wiki_name, new_wiki_name, auth)
except NameEmptyError:
raise WIKI_NAME_EMPTY_ERROR
except NameInvalidError as error:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_short='Invalid name',
message_long=error.args[0]
))
except NameMaximumLengthError:
raise WIKI_NAME_MAXIMUM_LENGTH_ERROR
except PageCannotRenameError:
raise WIKI_PAGE_CANNOT_RENAME_ERROR
except PageConflictError:
raise WIKI_PAGE_CONFLICT_ERROR
except PageNotFoundError:
raise WIKI_PAGE_NOT_FOUND_ERROR
else:
sharejs_uuid = wiki_utils.get_sharejs_uuid(node, new_wiki_name)
wiki_utils.broadcast_to_sharejs('redirect', sharejs_uuid, node, new_wiki_name)
@must_be_valid_project # returns project
@must_have_permission('write') # returns user, project
@must_not_be_registration
@must_have_addon('wiki', 'node')
def project_wiki_validate_name(wname, auth, node, **kwargs):
wiki_name = wname.strip()
wiki_key = to_mongo_key(wiki_name)
if wiki_key in node.wiki_pages_current or wiki_key == 'home':
raise HTTPError(http.CONFLICT, data=dict(
message_short='Wiki page name conflict.',
message_long='A wiki page with that name already exists.'
))
else:
node.update_node_wiki(wiki_name, '', auth)
return {'message': wiki_name}
@must_be_valid_project
@must_be_contributor_or_public
def project_wiki_grid_data(auth, node, **kwargs):
pages = []
project_wiki_pages = {
'title': 'Project Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_project_wiki_pages(node, auth)
}
pages.append(project_wiki_pages)
component_wiki_pages = {
'title': 'Component Wiki Pages',
'kind': 'folder',
'type': 'heading',
'children': format_component_wiki_pages(node, auth)
}
if len(component_wiki_pages['children']) > 0:
pages.append(component_wiki_pages)
return pages
def format_home_wiki_page(node):
home_wiki = node.get_wiki_page('home')
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_home'),
'name': 'Home',
'id': 'None',
}
}
if home_wiki:
home_wiki_page = {
'page': {
'url': node.web_url_for('project_wiki_view', wname='home', _guid=True),
'name': 'Home',
'id': home_wiki._primary_key,
}
}
return home_wiki_page
def format_project_wiki_pages(node, auth):
pages = []
can_edit = node.has_permission(auth.user, 'write') and not node.is_registration
project_wiki_pages = _get_wiki_pages_current(node)
home_wiki_page = format_home_wiki_page(node)
pages.append(home_wiki_page)
for wiki_page in project_wiki_pages:
if wiki_page['name'] != 'home':
has_content = bool(wiki_page['wiki_content'].get('wiki_content'))
page = {
'page': {
'url': wiki_page['url'],
'name': wiki_page['name'],
'id': wiki_page['wiki_id'],
}
}
if can_edit or has_content:
pages.append(page)
return pages
def format_component_wiki_pages(node, auth):
pages = []
for node in node.nodes:
if any([node.is_deleted,
not node.can_view(auth),
not node.has_addon('wiki')]):
continue
else:
serialized = serialize_component_wiki(node, auth)
if serialized:
pages.append(serialized)
return pages
def serialize_component_wiki(node, auth):
children = []
url = node.web_url_for('project_wiki_view', wname='home', _guid=True)
home_has_content = bool(wiki_page_content('home', node=node).get('wiki_content'))
component_home_wiki = {
'page': {
'url': url,
'name': 'Home',
# Handle pointers
'id': node._primary_key if node.primary else node.node._primary_key,
}
}
can_edit = node.has_permission(auth.user, 'write') and not node.is_registration
if can_edit or home_has_content:
children.append(component_home_wiki)
for page in _get_wiki_pages_current(node):
if page['name'] != 'home':
has_content = bool(page['wiki_content'].get('wiki_content'))
component_page = {
'page': {
'url': page['url'],
'name': page['name'],
'id': page['wiki_id'],
}
}
if can_edit or has_content:
children.append(component_page)
if len(children) > 0:
component = {
'page': {
'name': node.title,
'url': url,
},
'kind': 'component',
'category': node.category,
'pointer': not node.primary,
'children': children,
}
return component
return None
| |
import atexit
import logging
import re
import threading
from typing import Set
from uqbar.objects import new
import supriya.exceptions
from supriya.commands import (
FailResponse,
GroupQueryTreeRequest,
NotifyRequest,
QueryTreeGroup,
QueryTreeSynth,
QuitRequest,
SyncRequest,
)
from supriya.enums import NodeAction
from supriya.realtime import BlockAllocator, BootOptions, NodeIdAllocator
from supriya.system import PubSub
# TODO: Implement connect() and disconnect()
# TODO: Handle clientID return via [/done /notify 0 64] for allocators
logger = logging.getLogger("supriya.server")
class Server:
"""
An scsynth server proxy.
::
>>> import supriya.realtime
>>> server = supriya.realtime.Server.default()
>>> server.boot()
<Server: udp://127.0.0.1:57751, 8i8o>
::
>>> server.quit()
<Server: offline>
"""
### CLASS VARIABLES ###
__documentation_section__ = "Main Classes"
_default_server = None
_servers: Set["Server"] = set()
### CONSTRUCTOR ###
"""
def __new__(cls, ip_address="127.0.0.1", port=57751, **kwargs):
key = (ip_address, port)
if key not in cls._servers:
instance = object.__new__(cls)
instance.__init__(ip_address=ip_address, port=port, **kwargs)
cls._servers[key] = instance
return cls._servers[key]
"""
### INITIALIZER ###
def __init__(self, ip_address="127.0.0.1", port=57751):
import supriya.osc
import supriya.realtime
type(self)._servers.add(self)
### NET ADDRESS ###
self._ip_address = ip_address
self._port = port
### OSC MESSAGING ###
self._latency = 0.1
self._lock = threading.Lock()
self._osc_io = supriya.osc.OscIO()
### ALLOCATORS ###
self._audio_bus_allocator = None
self._buffer_allocator = None
self._control_bus_allocator = None
self._node_id_allocator = None
self._sync_id = 0
### SERVER PROCESS ###
self._client_id = None
self._maximum_logins = None
self._is_owner = False
self._is_running = False
self._options = BootOptions()
self._server_process = None
self._status = None
self._status_watcher = None
### PROXIES ###
self._audio_input_bus_group = None
self._audio_output_bus_group = None
self._default_group = None
self._root_node = None
self._meters = supriya.realtime.ServerMeters(self)
self._recorder = supriya.realtime.ServerRecorder(self)
### PROXY MAPPINGS ###
self._audio_buses = {}
self._buffer_proxies = {}
self._buffers = {}
self._control_bus_proxies = {}
self._control_buses = {}
self._nodes = {}
self._synthdefs = {}
### DEBUG ###
self.debug_request_names = False
### REGISTER WITH ATEXIT ###
atexit.register(self._shutdown)
### SPECIAL METHODS ###
def __contains__(self, expr):
import supriya.realtime
import supriya.synthdefs
if isinstance(expr, supriya.realtime.Node):
if expr.server is not self:
return False
node_id = expr.node_id
if node_id in self._nodes and self._nodes[node_id] is expr:
return True
elif isinstance(expr, supriya.synthdefs.SynthDef):
name = expr.actual_name
if name in self._synthdefs and self._synthdefs[name] == expr:
return True
elif isinstance(expr, supriya.realtime.ServerObject):
return expr.server is self
return False
def __enter__(self):
self.boot()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.sync()
self.quit()
def __getitem__(self, item):
"""
Get ``item`` from server.
::
>>> server = supriya.Server.default().boot()
>>> supriya.Synth(name='foo').allocate()
<+ Synth: 1000 (foo)>
::
>>> server[1000]
<+ Synth: 1000 (foo)>
::
>>> server['foo']
<+ Synth: 1000 (foo)>
::
>>> server['b10']
<+ Buffer: 10>
::
>>> server['a0']
<+ Bus: 0 (audio)>
::
>>> server['c16']
<+ Bus: 16 (control)>
::
>>> server = server.quit()
>>> server['c16']
Traceback (most recent call last):
...
supriya.exceptions.ServerOffline
"""
import supriya
if not self.is_running:
raise supriya.exceptions.ServerOffline
if isinstance(item, str):
match = re.match(r"b(?P<id>\d+)", item)
if match:
id_ = int(match.groupdict()["id"])
return supriya.realtime.Buffer(id_).allocate(server=self)
match = re.match(r"c(?P<id>\d+)", item)
if match:
id_ = int(match.groupdict()["id"])
return supriya.realtime.Bus(id_, "control").allocate(server=self)
match = re.match(r"a(?P<id>\d+)", item)
if match:
id_ = int(match.groupdict()["id"])
return supriya.realtime.Bus(id_, "audio").allocate(server=self)
result = self.root_node[item]
elif isinstance(item, int):
result = self._nodes.get(item)
else:
raise ValueError(item)
if isinstance(result, set) and len(result) == 1:
return tuple(result)[0]
return result
def __graph__(self):
"""
Graph server.
::
>>> import supriya
>>> server = supriya.Server.default().boot()
>>> group = supriya.Group([
... supriya.Synth(),
... supriya.Group([
... supriya.Synth(),
... supriya.Synth(),
... ]),
... ]).allocate()
::
>>> graph = server.__graph__()
>>> print(format(graph, 'graphviz'))
digraph G {
graph [bgcolor=transparent,
color=lightslategrey,
dpi=72,
fontname=Arial,
outputorder=edgesfirst,
overlap=prism,
penwidth=2,
rankdir=TB,
ranksep=0.5,
splines=spline,
style="dotted, rounded"];
node [fontname=Arial,
fontsize=12,
penwidth=2,
shape=Mrecord,
style="filled, rounded"];
edge [penwidth=2];
"root-node-0" [fillcolor=lightsalmon2,
label="{ <f_0_0> RootNode | <f_0_1> id: 0 }"];
"group-1" [fillcolor=lightsteelblue2,
label="{ <f_0_0> Group | <f_0_1> id: 1 }"];
"group-1000" [fillcolor=lightsteelblue2,
label="{ <f_0_0> Group | <f_0_1> id: 1000 }"];
"synth-1001" [fillcolor=lightgoldenrod2,
label="{ <f_0_0> Synth | <f_0_1> id: 1001 }"];
"group-1002" [fillcolor=lightsteelblue2,
label="{ <f_0_0> Group | <f_0_1> id: 1002 }"];
"synth-1003" [fillcolor=lightgoldenrod2,
label="{ <f_0_0> Synth | <f_0_1> id: 1003 }"];
"synth-1004" [fillcolor=lightgoldenrod2,
label="{ <f_0_0> Synth | <f_0_1> id: 1004 }"];
"root-node-0" -> "group-1";
"group-1" -> "group-1000";
"group-1000" -> "synth-1001";
"group-1000" -> "group-1002";
"group-1002" -> "synth-1003";
"group-1002" -> "synth-1004";
}
::
>>> supriya.graph(server) # doctest: +SKIP
"""
return self.root_node.__graph__()
def __repr__(self):
if not self.is_running:
return "<Server: offline>"
string = "<Server: {protocol}://{ip}:{port}, "
string += "{inputs}i{outputs}o>"
return string.format(
protocol=self.options.protocol,
ip=self.ip_address,
port=self.port,
inputs=self.options.input_bus_channel_count,
outputs=self.options.output_bus_channel_count,
)
def __str__(self):
if self.is_running:
return str(self.query_remote_nodes(True))
return ""
### PRIVATE METHODS ###
def _as_node_target(self):
return self.default_group
def _get_buffer_proxy(self, buffer_id):
import supriya.realtime
buffer_proxy = self._buffer_proxies.get(buffer_id)
if not buffer_proxy:
buffer_proxy = supriya.realtime.BufferProxy(
buffer_id=buffer_id, server=self
)
self._buffer_proxies[buffer_id] = buffer_proxy
return buffer_proxy
def _get_control_bus_proxy(self, bus_id):
import supriya.realtime
import supriya.synthdefs
control_bus_proxy = self._control_bus_proxies.get(bus_id)
if not control_bus_proxy:
control_bus_proxy = supriya.realtime.BusProxy(
bus_id=bus_id,
calculation_rate=supriya.CalculationRate.CONTROL,
server=self,
)
self._control_bus_proxies[bus_id] = control_bus_proxy
return control_bus_proxy
def _handle_buffer_info_response(self, response):
for item in response.items:
buffer_proxy = self._get_buffer_proxy(item.buffer_id)
if buffer_proxy:
buffer_proxy._handle_response(item)
def _handle_control_bus_set_response(self, response):
for item in response:
bus_id = item.bus_id
bus_proxy = self._get_control_bus_proxy(bus_id)
bus_proxy._value = item.bus_value
def _handle_control_bus_setn_response(self, response):
for item in response:
starting_bus_id = item.starting_bus_id
for i, value in enumerate(item.bus_values):
bus_id = starting_bus_id + i
bus_proxy = self._get_control_bus_proxy(bus_id)
bus_proxy._value = value
def _handle_node_info_response(self, response):
from supriya.realtime import Group, Synth
with self._lock:
node_id = response.node_id
node = self._nodes.get(node_id)
if node is not None:
node._handle_response(response)
elif response.action == NodeAction.NODE_CREATED:
if response.is_group:
node = Group()
else:
node = Synth()
node._register_with_local_server(server=self, node_id=response.node_id)
parent = self._nodes[response.parent_group_id]
node._set_parent(parent)
if response.previous_node_id:
previous_child = self._nodes[response.previous_node_id]
index = parent.index(previous_child)
parent._children.insert(index + 1, node)
else:
parent._children.append(node)
def _handle_synthdef_removed_response(self, response):
synthdef_name = response.synthdef_name
self._synthdefs.pop(synthdef_name, None)
def _setup_allocators(self):
self._audio_bus_allocator = BlockAllocator(
heap_maximum=self._options.audio_bus_channel_count,
heap_minimum=self._options.first_private_bus_id,
)
self._buffer_allocator = BlockAllocator(heap_maximum=self._options.buffer_count)
self._control_bus_allocator = BlockAllocator(
heap_maximum=self._options.control_bus_channel_count
)
self._node_id_allocator = NodeIdAllocator(
initial_node_id=self._options.initial_node_id, client_id=self.client_id
)
self._sync_id = self.client_id << 26
def _setup_notifications(self):
request = NotifyRequest(True)
response = request.communicate(server=self)
if isinstance(response, FailResponse):
self._shutdown()
raise supriya.exceptions.TooManyClients
self._client_id, self._maximum_logins = response.action[1], response.action[2]
def _setup_default_groups(self):
default_groups = [
supriya.Group(node_id_is_permanent=True) for _ in range(self.maximum_logins)
]
self.root_node.extend(default_groups)
self._default_group = default_groups[self.client_id]
def _setup_proxies(self):
import supriya.realtime
self._audio_input_bus_group = supriya.realtime.AudioInputBusGroup(self)
self._audio_output_bus_group = supriya.realtime.AudioOutputBusGroup(self)
self._root_node = supriya.realtime.RootNode(server=self)
self._nodes[0] = self._root_node
def _setup_osc_callbacks(self):
self._osc_io.register(
pattern="/b_info",
procedure=self._handle_buffer_info_response,
parse_response=True,
)
self._osc_io.register(
pattern="/c_set",
procedure=self._handle_control_bus_set_response,
parse_response=True,
)
self._osc_io.register(
pattern="/c_setn",
procedure=self._handle_control_bus_setn_response,
parse_response=True,
)
for pattern in (
"/n_end",
"/n_go",
"/n_info",
"/n_move",
"/n_off",
"/n_on",
"/n_set",
"/n_setn",
):
self._osc_io.register(
pattern=pattern,
procedure=self._handle_node_info_response,
parse_response=True,
)
self._osc_io.register(
pattern="/d_removed",
procedure=self._handle_synthdef_removed_response,
parse_response=True,
)
def failed(message):
logger.warning("Fail: {}".format(message))
self._osc_io.register(pattern="/fail", procedure=failed)
def _setup_status_watcher(self):
import supriya.realtime
self._status = None
self._status_watcher = supriya.realtime.StatusWatcher(self)
self._status_watcher.start()
def _setup_system_synthdefs(self, local_only=False):
import supriya.assets.synthdefs
import supriya.synthdefs
system_synthdefs = []
for name in dir(supriya.assets.synthdefs):
if not name.startswith("system_"):
continue
system_synthdef = getattr(supriya.assets.synthdefs, name)
if not isinstance(system_synthdef, supriya.synthdefs.SynthDef):
continue
system_synthdefs.append(system_synthdef)
if local_only:
for synthdef in system_synthdefs:
synthdef._register_with_local_server(self)
else:
supriya.synthdefs.SynthDef._allocate_synthdefs(system_synthdefs, self)
def _shutdown(self):
if not self.is_running:
return
elif self.is_owner:
self.quit()
else:
self.disconnect()
def _teardown_allocators(self):
self._audio_bus_allocator = None
self._buffer_allocator = None
self._control_bus_allocator = None
self._node_id_allocator = None
self._sync_id = 0
def _teardown_proxies(self):
for set_ in tuple(self._audio_buses.values()):
for x in tuple(set_):
x.free()
for set_ in tuple(self._buffers.values()):
for x in tuple(set_):
x.free()
for set_ in tuple(self._control_buses.values()):
for x in tuple(set_):
x.free()
for x in tuple(self._nodes.values()):
x.free()
self._audio_buses.clear()
self._audio_input_bus_group = None
self._audio_output_bus_group = None
self._buffers.clear()
self._buffer_proxies.clear()
self._control_buses.clear()
self._control_bus_proxies.clear()
self._default_group = None
self._nodes.clear()
self._root_node = None
self._synthdefs.clear()
def _teardown_status_watcher(self):
if self._status_watcher is not None:
self._status_watcher.is_active = False
self._status_watcher = None
self._status = None
### PUBLIC METHODS ###
def boot(self, scsynth_path=None, options=None, **kwargs):
if self.is_running:
return self
self._options = new(options or BootOptions(), **kwargs)
scsynth_path = BootOptions.find_scsynth(scsynth_path)
self._server_process = self._options.boot(scsynth_path, self.port)
self._is_owner = True
self._connect()
PubSub.notify("server-booted")
return self
def _connect(self):
self._is_running = True
self._osc_io.boot(ip_address=self.ip_address, port=self.port)
self._setup_osc_callbacks()
self._setup_status_watcher()
self._setup_notifications()
self._setup_allocators()
self._setup_proxies()
if self.client_id == 0:
self._setup_default_groups()
self._setup_system_synthdefs()
def _rehydrate(self):
from supriya.realtime import Group, Synth
def recurse(query_tree_node, node):
for query_tree_child in query_tree_node.children:
if isinstance(query_tree_child, QueryTreeGroup):
group = Group()
group._register_with_local_server(
node_id=query_tree_child.node_id, server=self
)
node._children.append(group)
recurse(query_tree_child, group)
elif isinstance(query_tree_child, QueryTreeSynth):
synth = Synth()
synth._register_with_local_server(
node_id=query_tree_child.node_id, server=self
)
node._children.append(synth)
for query_tree_control in query_tree_child.children:
pass
recurse(self.query_remote_nodes(include_controls=True), self.root_node)
def connect(self):
if self.is_running:
return
self._is_owner = False
self._connect()
if self.client_id > 0:
self._setup_system_synthdefs(local_only=True)
self._rehydrate()
self._default_group = self._nodes[self.client_id + 1]
return self
def disconnect(self, force=False):
if not self.is_running:
raise supriya.exceptions.ServerOffline
if self._is_owner and not force:
raise supriya.exceptions.OwnedServerShutdown(
"Cannot disconnect from owned server with force flag."
)
self._disconnect()
return self
def _disconnect(self):
self._is_running = False
self._is_owner = False
self._client_id = None
self._maximum_logins = None
self._osc_io.quit()
self._teardown_proxies()
self._teardown_allocators()
self._teardown_status_watcher()
def quit(self, force=False):
if not self.is_running:
return
if not self._is_owner and not force:
raise supriya.exceptions.UnownedServerShutdown(
"Cannot quit unowned server without force flag."
)
PubSub.notify("server-quitting")
if self.recorder.is_recording:
self.recorder.stop()
QuitRequest().communicate(server=self)
if self._server_process is not None and not self._server_process.terminate():
self._server_process.wait()
self._disconnect()
PubSub.notify("server-quit")
return self
@classmethod
def default(cls):
if cls._default_server is None:
cls._default_server = Server()
return cls._default_server
@classmethod
def kill(cls, supernova=False):
BootOptions.kill(supernova=supernova)
def query_local_nodes(self, include_controls=False):
"""
Queries all node proxies in Python.
::
>>> import supriya.realtime
>>> server = supriya.Server.default()
>>> server.boot()
<Server: udp://127.0.0.1:57751, 8i8o>
::
>>> group_a = supriya.realtime.Group().allocate()
>>> group_b = supriya.realtime.Group().allocate()
>>> group_c = supriya.realtime.Group().allocate(target_node=group_a)
::
>>> import supriya.synthdefs
>>> import supriya.ugens
>>> with supriya.synthdefs.SynthDefBuilder(
... amplitude=0.0,
... frequency=440.0,
... ) as builder:
... sin_osc = supriya.ugens.SinOsc.ar(
... frequency=builder['frequency'],
... )
... sin_osc *= builder['amplitude']
... out = supriya.ugens.Out.ar(
... bus=0,
... source=[sin_osc, sin_osc],
... )
...
>>> synthdef = builder.build()
>>> synthdef.allocate()
<SynthDef: e41193ac8b7216f49ff0d477876a3bf3>
::
>>> synth = supriya.realtime.Synth(synthdef).allocate(
... target_node=group_b,
... )
::
>>> response = server.query_local_nodes(include_controls=True)
>>> print(response)
NODE TREE 0 group
1 group
1001 group
1003 e41193ac8b7216f49ff0d477876a3bf3
amplitude: 0.0, frequency: 440.0
1000 group
1002 group
::
>>> server.quit()
<Server: offline>
Returns server query-tree group response.
"""
query_tree_group = QueryTreeGroup.from_group(
self.root_node, include_controls=include_controls
)
return query_tree_group
def query_remote_nodes(self, include_controls=False):
"""
Queries all nodes on scsynth.
::
>>> import supriya.realtime
>>> server = supriya.Server.default()
>>> server.boot()
<Server: udp://127.0.0.1:57751, 8i8o>
::
>>> group_a = supriya.realtime.Group().allocate()
>>> group_b = supriya.realtime.Group().allocate()
>>> group_c = supriya.realtime.Group().allocate(target_node=group_a)
::
>>> import supriya.synthdefs
>>> import supriya.ugens
>>> with supriya.synthdefs.SynthDefBuilder(
... amplitude=0.0,
... frequency=440.0,
... ) as builder:
... sin_osc = supriya.ugens.SinOsc.ar(
... frequency=builder['frequency'],
... )
... sin_osc *= builder['amplitude']
... out = supriya.ugens.Out.ar(
... bus=0,
... source=[sin_osc, sin_osc],
... )
...
>>> synthdef = builder.build()
>>> synthdef.allocate()
<SynthDef: e41193ac8b7216f49ff0d477876a3bf3>
::
>>> synth = supriya.realtime.Synth(synthdef).allocate(
... target_node=group_b,
... )
::
>>> response = server.query_remote_nodes(include_controls=False)
>>> print(response)
NODE TREE 0 group
1 group
1001 group
1003 e41193ac8b7216f49ff0d477876a3bf3
1000 group
1002 group
::
>>> server.quit()
<Server: offline>
Returns server query-tree group response.
"""
request = GroupQueryTreeRequest(node_id=0, include_controls=include_controls)
response = request.communicate(server=self)
return response.query_tree_group
def reboot(self, options=None, **kwargs):
self.quit()
self.boot(options=options, **kwargs)
return self
def send_message(self, message, with_request_name=False):
if not message or not self.is_running:
return
self._osc_io.send(
message, with_request_name=with_request_name or self.debug_request_names
)
def sync(self, sync_id=None):
if not self.is_running:
return
if sync_id is None:
sync_id = self.next_sync_id
request = SyncRequest(sync_id=sync_id)
request.communicate(server=self)
return self
### PUBLIC PROPERTIES ###
@property
def audio_bus_allocator(self):
return self._audio_bus_allocator
@property
def audio_input_bus_group(self):
return self._audio_input_bus_group
@property
def audio_output_bus_group(self):
return self._audio_output_bus_group
@property
def buffer_allocator(self):
return self._buffer_allocator
@property
def client_id(self):
return self._client_id
@property
def control_bus_allocator(self):
return self._control_bus_allocator
@property
def debug_request_names(self):
return self._debug_request_names
@debug_request_names.setter
def debug_request_names(self, expr):
self._debug_request_names = bool(expr)
@property
def default_group(self):
return self._default_group
@property
def ip_address(self):
return self._ip_address
@property
def is_owner(self):
return self._is_owner
@property
def is_running(self):
return self._is_running
@property
def latency(self):
return self._latency
@latency.setter
def latency(self, latency):
self._latency = float(latency)
@property
def maximum_logins(self):
return self._maximum_logins
@property
def meters(self):
return self._meters
@property
def next_sync_id(self):
sync_id = self._sync_id
self._sync_id += 1
return sync_id
@property
def node_id_allocator(self):
return self._node_id_allocator
@property
def osc_io(self):
return self._osc_io
@property
def port(self):
return self._port
@property
def recorder(self):
return self._recorder
@property
def root_node(self):
return self._root_node
@property
def options(self):
return self._options
@property
def status(self):
return self._status
| |
# -*- coding: utf-8 -*-
import httplib as http
import os
from flask import send_from_directory
from framework import status
from framework import sentry
from framework.routing import Rule
from framework.flask import redirect
from framework.routing import WebRenderer
from framework.exceptions import HTTPError
from framework.auth import get_display_name
from framework.routing import xml_renderer
from framework.routing import json_renderer
from framework.routing import process_rules
from framework.auth import views as auth_views
from framework.routing import render_mako_string
from framework.auth.core import _get_current_user
from website import util
from website import settings
from website import language
from website.util import paths
from website.util import sanitize
from website import landing_pages as landing_page_views
from website import views as website_views
from website.citations import views as citation_views
from website.search import views as search_views
from website.oauth import views as oauth_views
from website.profile import views as profile_views
from website.project import views as project_views
from website.addons.base import views as addon_views
from website.discovery import views as discovery_views
from website.conferences import views as conference_views
from website.notifications import views as notification_views
def get_globals():
"""Context variables that are available for every template rendered by
OSFWebRenderer.
"""
user = _get_current_user()
return {
'user_name': user.username if user else '',
'user_full_name': user.fullname if user else '',
'user_id': user._primary_key if user else '',
'user_url': user.url if user else '',
'user_gravatar': profile_views.current_user_gravatar(size=25)['gravatar_url'] if user else '',
'user_api_url': user.api_url if user else '',
'display_name': get_display_name(user.fullname) if user else '',
'use_cdn': settings.USE_CDN_FOR_CLIENT_LIBS,
'piwik_host': settings.PIWIK_HOST,
'piwik_site_id': settings.PIWIK_SITE_ID,
'sentry_dsn_js': settings.SENTRY_DSN_JS if sentry.enabled else None,
'dev_mode': settings.DEV_MODE,
'allow_login': settings.ALLOW_LOGIN,
'cookie_name': settings.COOKIE_NAME,
'status': status.pop_status_messages(),
'domain': settings.DOMAIN,
'disk_saving_mode': settings.DISK_SAVING_MODE,
'language': language,
'web_url_for': util.web_url_for,
'api_url_for': util.api_url_for,
'sanitize': sanitize,
'js_str': lambda x: x.replace("'", r"\'").replace('"', r'\"'),
'webpack_asset': paths.webpack_asset,
'waterbutler_url': settings.WATERBUTLER_URL,
}
class OsfWebRenderer(WebRenderer):
def __init__(self, *args, **kwargs):
kwargs['data'] = get_globals
super(OsfWebRenderer, self).__init__(*args, **kwargs)
#: Use if a view only redirects or raises error
notemplate = OsfWebRenderer('', render_mako_string)
# Static files (robots.txt, etc.)
def favicon():
return send_from_directory(
settings.STATIC_FOLDER,
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def robots():
"""Serves the robots.txt file."""
# Allow local robots.txt
if os.path.exists(os.path.join(settings.STATIC_FOLDER,
'robots.local.txt')):
robots_file = 'robots.local.txt'
else:
robots_file = 'robots.txt'
return send_from_directory(
settings.STATIC_FOLDER,
robots_file,
mimetype='text/plain'
)
def goodbye():
# Redirect to dashboard if logged in
if _get_current_user():
return redirect(util.web_url_for('dashboard'))
status.push_status_message(language.LOGOUT, 'info')
return {}
def make_url_map(app):
'''Set up all the routes for the OSF app.
:param app: A Flask/Werkzeug app to bind the rules to.
'''
# Set default views to 404, using URL-appropriate renderers
process_rules(app, [
Rule('/<path:_>', ['get', 'post'], HTTPError(http.NOT_FOUND),
OsfWebRenderer('', render_mako_string)),
Rule('/api/v1/<path:_>', ['get', 'post'],
HTTPError(http.NOT_FOUND), json_renderer),
])
### GUID ###
process_rules(app, [
Rule(
[
'/<guid>/',
'/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
OsfWebRenderer('', render_mako_string),
),
Rule(
[
'/api/v1/<guid>/',
'/api/v1/<guid>/<path:suffix>',
],
['get', 'post', 'put', 'patch', 'delete'],
website_views.resolve_guid,
json_renderer,
),
])
# Static files
process_rules(app, [
Rule('/favicon.ico', 'get', favicon, json_renderer),
Rule('/robots.txt', 'get', robots, json_renderer),
])
### Base ###
process_rules(app, [
Rule('/dashboard/', 'get', website_views.dashboard, OsfWebRenderer('dashboard.mako')),
Rule('/reproducibility/', 'get',
website_views.reproducibility, OsfWebRenderer('', render_mako_string)),
Rule('/about/', 'get', {}, OsfWebRenderer('public/pages/about.mako')),
Rule('/howosfworks/', 'get', {}, OsfWebRenderer('public/pages/howosfworks.mako')),
Rule('/faq/', 'get', {}, OsfWebRenderer('public/pages/faq.mako')),
Rule('/getting-started/', 'get', {}, OsfWebRenderer('public/pages/getting_started.mako')),
Rule('/explore/', 'get', {}, OsfWebRenderer('public/explore.mako')),
Rule(['/messages/', '/help/'], 'get', {}, OsfWebRenderer('public/comingsoon.mako')),
Rule(
'/view/<meeting>/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting.mako'),
),
Rule(
'/view/<meeting>/plain/',
'get',
conference_views.conference_results,
OsfWebRenderer('public/pages/meeting_plain.mako'),
endpoint_suffix='__plain',
),
Rule(
'/api/v1/view/<meeting>/',
'get',
conference_views.conference_data,
json_renderer,
),
Rule(
'/presentations/',
'get',
conference_views.conference_view,
OsfWebRenderer('public/pages/meeting_landing.mako'),
),
Rule('/news/', 'get', {}, OsfWebRenderer('public/pages/news.mako')),
])
# Site-wide API routes
process_rules(app, [
Rule(
'/citations/styles/',
'get',
citation_views.list_citation_styles,
json_renderer,
),
], prefix='/api/v1')
process_rules(app, [
Rule(
[
'/project/<pid>/<addon>/settings/disable/',
'/project/<pid>/node/<nid>/<addon>/settings/disable/',
],
'post',
addon_views.disable_addon,
json_renderer,
),
Rule(
'/profile/<uid>/<addon>/settings/',
'get',
addon_views.get_addon_user_config,
json_renderer,
),
], prefix='/api/v1')
# OAuth
process_rules(app, [
Rule(
'/oauth/connect/<service_name>/',
'get',
oauth_views.oauth_connect,
json_renderer,
),
Rule(
'/oauth/callback/<service_name>/',
'get',
oauth_views.oauth_callback,
OsfWebRenderer('util/oauth_complete.mako'),
),
])
process_rules(app, [
Rule(
[
'/oauth/accounts/<external_account_id>/',
],
'delete',
oauth_views.oauth_disconnect,
json_renderer,
)
], prefix='/api/v1')
process_rules(app, [
Rule('/dashboard/get_nodes/', 'get', website_views.get_dashboard_nodes, json_renderer),
Rule(
[
'/dashboard/<nid>',
'/dashboard/',
],
'get', website_views.get_dashboard, json_renderer),
], prefix='/api/v1')
### Meta-data ###
process_rules(app, [
Rule(
[
'/project/<pid>/comments/',
'/project/<pid>/node/<nid>/comments/',
],
'get',
project_views.comment.list_comments,
json_renderer,
),
Rule(
[
'/project/<pid>/comments/discussion/',
'/project/<pid>/node/<nid>/comments/discussion/',
],
'get',
project_views.comment.comment_discussion,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/',
'/project/<pid>/node/<nid>/comment/',
],
'post',
project_views.comment.add_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/',
'/project/<pid>/node/<nid>/comment/<cid>/',
],
'put',
project_views.comment.edit_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/',
'/project/<pid>/node/<nid>/comment/<cid>/',
],
'delete',
project_views.comment.delete_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/undelete/',
'/project/<pid>/node/<nid>/comment/<cid>/undelete/',
],
'put',
project_views.comment.undelete_comment,
json_renderer,
),
Rule(
[
'/project/<pid>/comments/timestamps/',
'/project/<pid>/node/<nid>/comments/timestamps/',
],
'put',
project_views.comment.update_comments_timestamp,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/report/',
'/project/<pid>/node/<nid>/comment/<cid>/report/',
],
'post',
project_views.comment.report_abuse,
json_renderer,
),
Rule(
[
'/project/<pid>/comment/<cid>/unreport/',
'/project/<pid>/node/<nid>/comment/<cid>/unreport/',
],
'post',
project_views.comment.unreport_abuse,
json_renderer,
),
Rule(
[
'/project/<pid>/citation/',
'/project/<pid>/node/<nid>/citation/',
],
'get',
citation_views.node_citation,
json_renderer,
),
], prefix='/api/v1')
### Forms ###
process_rules(app, [
Rule('/forms/registration/', 'get', website_views.registration_form, json_renderer),
Rule('/forms/signin/', 'get', website_views.signin_form, json_renderer),
Rule('/forms/forgot_password/', 'get', website_views.forgot_password_form, json_renderer),
Rule('/forms/reset_password/', 'get', website_views.reset_password_form, json_renderer),
], prefix='/api/v1')
### Discovery ###
process_rules(app, [
Rule('/explore/activity/', 'get', discovery_views.activity,
OsfWebRenderer('public/pages/active_nodes.mako')),
])
### Auth ###
# Web
process_rules(app, [
Rule(
'/confirm/<uid>/<token>/',
'get',
auth_views.confirm_email_get,
# View will either redirect or display error message
OsfWebRenderer('error.mako', render_mako_string)
),
Rule(
'/resend/',
['get', 'post'],
auth_views.resend_confirmation,
OsfWebRenderer('resend.mako', render_mako_string)
),
Rule(
'/resetpassword/<verification_key>/',
['get', 'post'],
auth_views.reset_password,
OsfWebRenderer('public/resetpassword.mako', render_mako_string)
),
# TODO: Remove `auth_register_post`
Rule('/register/', 'post', auth_views.auth_register_post,
OsfWebRenderer('public/login.mako')),
Rule('/api/v1/register/', 'post', auth_views.register_user, json_renderer),
Rule(['/login/', '/account/'], 'get',
auth_views.auth_login, OsfWebRenderer('public/login.mako')),
Rule('/login/', 'post', auth_views.auth_login,
OsfWebRenderer('public/login.mako'), endpoint_suffix='__post'),
Rule('/login/first/', 'get', auth_views.auth_login,
OsfWebRenderer('public/login.mako'),
endpoint_suffix='__first', view_kwargs={'first': True}),
Rule('/login/two-factor/', ['get', 'post'], auth_views.two_factor,
OsfWebRenderer('public/two_factor.mako')),
Rule('/logout/', 'get', auth_views.auth_logout, notemplate),
# TODO(hrybacki): combining the get/posts into a single rule is causing a build error and needs debugging
Rule('/forgotpassword/', 'get', auth_views._forgot_password,
OsfWebRenderer('public/forgot_password.mako')),
Rule('/forgotpassword/', 'post', auth_views.forgot_password,
OsfWebRenderer('public/login.mako')),
Rule([
'/midas/', '/summit/', '/accountbeta/', '/decline/'
], 'get', auth_views.auth_registerbeta, OsfWebRenderer('', render_mako_string)),
Rule('/login/connected_tools/',
'get',
landing_page_views.connected_tools,
OsfWebRenderer('public/login_landing.mako')),
Rule('/login/enriched_profile/',
'get',
landing_page_views.enriched_profile,
OsfWebRenderer('public/login_landing.mako')),
])
### Profile ###
# Web
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, OsfWebRenderer('profile.mako')),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id,
OsfWebRenderer('profile.mako')),
Rule('/settings/key_history/<kid>/', 'get', profile_views.user_key_history,
OsfWebRenderer('profile/key_history.mako')),
Rule('/addons/', 'get', profile_views.profile_addons,
OsfWebRenderer('profile/addons.mako')),
Rule(["/user/merge/"], 'get', auth_views.merge_user_get,
OsfWebRenderer("merge_accounts.mako")),
Rule(["/user/merge/"], 'post', auth_views.merge_user_post,
OsfWebRenderer("merge_accounts.mako")),
# Route for claiming and setting email and password.
# Verification token must be querystring argument
Rule(['/user/<uid>/<pid>/claim/'], ['get', 'post'],
project_views.contributor.claim_user_form, OsfWebRenderer('claim_account.mako')),
Rule(['/user/<uid>/<pid>/claim/verify/<token>/'], ['get', 'post'],
project_views.contributor.claim_user_registered,
OsfWebRenderer('claim_account_registered.mako')),
Rule(
'/settings/',
'get',
profile_views.user_profile,
OsfWebRenderer('profile/settings.mako'),
),
Rule(
'/settings/account/',
'get',
profile_views.user_account,
OsfWebRenderer('profile/account.mako'),
),
Rule(
'/settings/account/password',
'post',
profile_views.user_account_password,
OsfWebRenderer('profile/account.mako'),
),
Rule(
'/settings/addons/',
'get',
profile_views.user_addons,
OsfWebRenderer('profile/addons.mako'),
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
OsfWebRenderer('profile/notifications.mako'),
),
])
# API
process_rules(app, [
Rule('/profile/', 'get', profile_views.profile_view, json_renderer),
Rule('/profile/', 'put', profile_views.update_user, json_renderer),
Rule('/profile/<uid>/', 'get', profile_views.profile_view_id, json_renderer),
# Used by profile.html
Rule('/profile/<uid>/edit/', 'post', profile_views.edit_profile, json_renderer),
Rule('/profile/<uid>/public_projects/', 'get',
profile_views.get_public_projects, json_renderer),
Rule('/profile/<uid>/public_components/', 'get',
profile_views.get_public_components, json_renderer),
Rule('/settings/keys/', 'get', profile_views.get_keys, json_renderer),
Rule('/settings/create_key/', 'post', profile_views.create_user_key, json_renderer),
Rule('/settings/revoke_key/', 'post', profile_views.revoke_user_key, json_renderer),
Rule('/settings/key_history/<kid>/', 'get', profile_views.user_key_history, json_renderer),
Rule('/profile/<user_id>/summary/', 'get',
profile_views.get_profile_summary, json_renderer),
Rule('/user/<uid>/<pid>/claim/email/', 'post',
project_views.contributor.claim_user_post, json_renderer),
Rule(
[
'/profile/gravatar/',
'/users/gravatar/',
'/profile/gravatar/<size>',
'/users/gravatar/<size>',
],
'get',
profile_views.current_user_gravatar,
json_renderer,
),
Rule(
[
'/profile/<uid>/gravatar/',
'/users/<uid>/gravatar/',
'/profile/<uid>/gravatar/<size>',
'/users/<uid>/gravatar/<size>',
],
'get',
profile_views.get_gravatar,
json_renderer,
),
# Rules for user profile configuration
Rule('/settings/names/', 'get', profile_views.serialize_names, json_renderer),
Rule('/settings/names/', 'put', profile_views.unserialize_names, json_renderer),
Rule('/settings/names/impute/', 'get', profile_views.impute_names, json_renderer),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'get',
profile_views.serialize_social,
json_renderer,
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'get',
profile_views.serialize_jobs,
json_renderer,
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'get',
profile_views.serialize_schools,
json_renderer,
),
Rule(
[
'/settings/social/',
'/settings/social/<uid>/',
],
'put',
profile_views.unserialize_social,
json_renderer
),
Rule(
[
'/settings/jobs/',
'/settings/jobs/<uid>/',
],
'put',
profile_views.unserialize_jobs,
json_renderer
),
Rule(
[
'/settings/schools/',
'/settings/schools/<uid>/',
],
'put',
profile_views.unserialize_schools,
json_renderer
),
], prefix='/api/v1',)
### Search ###
# Web
process_rules(app, [
Rule('/search/', 'get', {}, OsfWebRenderer('search.mako')),
Rule('/share/', 'get', {}, OsfWebRenderer('share_search.mako')),
Rule('/share_dashboard/', 'get', {}, OsfWebRenderer('share_dashboard.mako')),
Rule('/share/atom/', 'get', search_views.search_share_atom, xml_renderer),
Rule('/api/v1/user/search/', 'get', search_views.search_contributor, json_renderer),
Rule(
'/api/v1/search/node/',
'post',
project_views.node.search_node,
json_renderer,
),
])
# API
process_rules(app, [
Rule(['/search/', '/search/<type>/'], ['get', 'post'], search_views.search_search, json_renderer),
Rule('/search/projects/', 'get', search_views.search_projects_by_title, json_renderer),
Rule('/share/', ['get', 'post'], search_views.search_share, json_renderer),
Rule('/share/stats/', 'get', search_views.search_share_stats, json_renderer),
Rule('/share/providers/', 'get', search_views.search_share_providers, json_renderer),
], prefix='/api/v1')
# Project
# Web
process_rules(app, [
Rule('/', 'get', website_views.index, OsfWebRenderer('index.mako')),
Rule('/goodbye/', 'get', goodbye, OsfWebRenderer('index.mako')),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, OsfWebRenderer('project/project.mako')),
# Create a new subproject/component
Rule('/project/<pid>/newnode/', 'post', project_views.node.project_new_node,
OsfWebRenderer('', render_mako_string)),
Rule([
'/project/<pid>/key_history/<kid>/',
'/project/<pid>/node/<nid>/key_history/<kid>/',
], 'get', project_views.key.node_key_history, OsfWebRenderer('project/key_history.mako')),
# # TODO: Add API endpoint for tags
# Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, OsfWebRenderer('tags.mako')),
Rule('/folder/<nid>', 'get', project_views.node.folder_new,
OsfWebRenderer('project/new_folder.mako')),
Rule('/api/v1/folder/<nid>', 'post', project_views.node.folder_new_post, json_renderer),
Rule('/project/new/<pid>/beforeTemplate/', 'get',
project_views.node.project_before_template, json_renderer),
Rule(
[
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
],
'get',
project_views.node.node_contributors,
OsfWebRenderer('project/contributors.mako'),
),
Rule(
[
'/project/<pid>/settings/',
'/project/<pid>/node/<nid>/settings/',
],
'get',
project_views.node.node_setting,
OsfWebRenderer('project/settings.mako')
),
# Permissions
Rule(
[
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
],
'post',
project_views.node.project_set_privacy,
OsfWebRenderer('project/project.mako')
),
### Logs ###
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, OsfWebRenderer('project/forks.mako')),
# Registrations
Rule([
'/project/<pid>/register/',
'/project/<pid>/node/<nid>/register/',
], 'get', project_views.register.node_register_page,
OsfWebRenderer('project/register.mako')),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page,
OsfWebRenderer('project/register.mako')),
Rule([
'/project/<pid>/registrations/',
'/project/<pid>/node/<nid>/registrations/',
], 'get', project_views.node.node_registrations,
OsfWebRenderer('project/registrations.mako')),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics,
OsfWebRenderer('project/statistics.mako')),
### Files ###
# Note: Web endpoint for files view must pass `mode` = `page` to
# include project view data and JS includes
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/',
],
'get',
project_views.file.collect_file_trees,
OsfWebRenderer('project/files.mako'),
view_kwargs={'mode': 'page'},
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_view_or_download_file,
OsfWebRenderer('project/view_file.mako')
),
Rule(
[
# Legacy Addon view file paths
'/project/<pid>/<provider>/files/<path:path>/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/',
'/project/<pid>/<provider>/files/<path:path>/download/',
'/project/<pid>/node/<nid>/<provider>/files/<path:path>/download/',
# Legacy routes for `download_file`
'/project/<pid>/osffiles/<fid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/download/',
# Legacy routes for `view_file`
'/project/<pid>/osffiles/<fid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/download/<fid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/',
'/project/<pid>/files/<fid>/',
'/project/<pid>/node/<nid>/files/<fid>/',
'/project/<pid>/files/download/<fid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/',
# Legacy routes for `download_file_by_version`
'/project/<pid>/osffiles/<fid>/version/<vid>/download/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/download/',
# Note: Added these old URLs for backwards compatibility with
# hard-coded links.
'/project/<pid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/project/<pid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/osffiles/download/<fid>/version/<vid>/',
'/project/<pid>/files/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/<fid>/version/<vid>/',
'/project/<pid>/files/download/<fid>/version/<vid>/',
'/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
# api/v1 Legacy routes for `download_file`
'/api/v1/project/<pid>/osffiles/<fid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/',
'/api/v1/project/<pid>/files/download/<fid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/',
#api/v1 Legacy routes for `download_file_by_version`
'/api/v1/project/<pid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/osffiles/<fid>/version/<vid>/',
'/api/v1/project/<pid>/files/download/<fid>/version/<vid>/',
'/api/v1/project/<pid>/node/<nid>/files/download/<fid>/version/<vid>/',
],
'get',
addon_views.addon_view_or_download_file_legacy,
json_renderer
),
])
# API
process_rules(app, [
Rule(
'/email/meeting/',
'post',
conference_views.meeting_hook,
json_renderer,
),
Rule('/mailchimp/hooks/', 'get', profile_views.mailchimp_get_endpoint, json_renderer),
Rule('/mailchimp/hooks/', 'post', profile_views.sync_data_from_mailchimp, json_renderer),
# Create project, used by projectCreator.js
Rule('/project/new/', 'post', project_views.node.project_new_post, json_renderer),
Rule([
'/project/<pid>/contributors_abbrev/',
'/project/<pid>/node/<nid>/contributors_abbrev/',
], 'get', project_views.contributor.get_node_contributors_abbrev, json_renderer),
Rule('/tags/<tag>/', 'get', project_views.tag.project_tag, json_renderer),
Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', project_views.node.view_project, json_renderer),
Rule([
'/project/<pid>/expand/',
'/project/<pid>/node/<nid>/expand/',
], 'post', project_views.node.expand, json_renderer),
Rule([
'/project/<pid>/collapse/',
'/project/<pid>/node/<nid>/collapse/',
], 'post', project_views.node.collapse, json_renderer),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'get',
project_views.node.get_pointed,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>/pointer/',
],
'post',
project_views.node.add_pointers,
json_renderer,
),
Rule(
[
'/pointer/',
],
'post',
project_views.node.add_pointer,
json_renderer,
),
Rule(
[
'/pointers/move/',
],
'post',
project_views.node.move_pointers,
json_renderer,
),
Rule(
[
'/project/<pid>/pointer/',
'/project/<pid>/node/<nid>pointer/',
],
'delete',
project_views.node.remove_pointer,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointer/<pointer_id>',
],
'delete',
project_views.node.remove_pointer_from_folder,
json_renderer,
),
Rule(
[
'/folder/<pid>/pointers/',
],
'delete',
project_views.node.remove_pointers_from_folder,
json_renderer,
),
Rule(
[
'/folder/<pid>',
],
'delete',
project_views.node.delete_folder,
json_renderer,
),
Rule('/folder/', 'put', project_views.node.add_folder, json_renderer),
Rule([
'/project/<pid>/get_summary/',
'/project/<pid>/node/<nid>/get_summary/',
], 'get', project_views.node.get_summary, json_renderer),
Rule([
'/project/<pid>/get_children/',
'/project/<pid>/node/<nid>/get_children/',
], 'get', project_views.node.get_children, json_renderer),
Rule([
'/project/<pid>/get_folder_pointers/'
], 'get', project_views.node.get_folder_pointers, json_renderer),
Rule([
'/project/<pid>/get_forks/',
'/project/<pid>/node/<nid>/get_forks/',
], 'get', project_views.node.get_forks, json_renderer),
Rule([
'/project/<pid>/get_registrations/',
'/project/<pid>/node/<nid>/get_registrations/',
], 'get', project_views.node.get_registrations, json_renderer),
Rule('/log/<log_id>/', 'get', project_views.log.get_log, json_renderer),
Rule([
'/project/<pid>/log/',
'/project/<pid>/node/<nid>/log/',
], 'get', project_views.log.get_logs, json_renderer),
Rule([
'/project/<pid>/get_contributors/',
'/project/<pid>/node/<nid>/get_contributors/',
], 'get', project_views.contributor.get_contributors, json_renderer),
Rule([
'/project/<pid>/get_contributors_from_parent/',
'/project/<pid>/node/<nid>/get_contributors_from_parent/',
], 'get', project_views.contributor.get_contributors_from_parent, json_renderer),
# Reorder contributors
Rule(
[
'/project/<pid>/contributors/manage/',
'/project/<pid>/node/<nid>/contributors/manage/',
],
'POST',
project_views.contributor.project_manage_contributors,
json_renderer,
),
Rule([
'/project/<pid>/get_most_in_common_contributors/',
'/project/<pid>/node/<nid>/get_most_in_common_contributors/',
], 'get', project_views.contributor.get_most_in_common_contributors, json_renderer),
Rule([
'/project/<pid>/get_recently_added_contributors/',
'/project/<pid>/node/<nid>/get_recently_added_contributors/',
], 'get', project_views.contributor.get_recently_added_contributors, json_renderer),
Rule([
'/project/<pid>/get_editable_children/',
'/project/<pid>/node/<nid>/get_editable_children/',
], 'get', project_views.node.get_editable_children, json_renderer),
# Private Link
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'post', project_views.node.project_generate_private_link_post, json_renderer),
Rule([
'/project/<pid>/private_link/edit/',
'/project/<pid>/node/<nid>/private_link/edit/',
], 'put', project_views.node.project_private_link_edit, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'delete', project_views.node.remove_private_link, json_renderer),
Rule([
'/project/<pid>/private_link/',
'/project/<pid>/node/<nid>/private_link/',
], 'get', project_views.node.private_link_table, json_renderer),
# Create, using existing project as a template
Rule([
'/project/new/<nid>/',
], 'post', project_views.node.project_new_from_template, json_renderer),
# Remove
Rule(
[
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
],
'delete',
project_views.node.component_remove,
json_renderer,
),
# API keys
Rule([
'/project/<pid>/create_key/',
'/project/<pid>/node/<nid>/create_key/',
], 'post', project_views.key.create_node_key, json_renderer),
Rule([
'/project/<pid>/revoke_key/',
'/project/<pid>/node/<nid>/revoke_key/'
], 'post', project_views.key.revoke_node_key, json_renderer),
Rule([
'/project/<pid>/keys/',
'/project/<pid>/node/<nid>/keys/',
], 'get', project_views.key.get_node_keys, json_renderer),
# Reorder components
Rule('/project/<pid>/reorder_components/', 'post',
project_views.node.project_reorder_components, json_renderer),
# Edit node
Rule([
'/project/<pid>/edit/',
'/project/<pid>/node/<nid>/edit/',
], 'post', project_views.node.edit_node, json_renderer),
# Tags
Rule([
'/project/<pid>/addtag/<tag>/',
'/project/<pid>/node/<nid>/addtag/<tag>/',
], 'post', project_views.tag.project_addtag, json_renderer),
Rule([
'/project/<pid>/removetag/<tag>/',
'/project/<pid>/node/<nid>/removetag/<tag>/',
], 'post', project_views.tag.project_removetag, json_renderer),
# Add / remove contributors
Rule([
'/project/<pid>/contributors/',
'/project/<pid>/node/<nid>/contributors/',
], 'post', project_views.contributor.project_contributors_post, json_renderer),
Rule([
'/project/<pid>/beforeremovecontributors/',
'/project/<pid>/node/<nid>/beforeremovecontributors/',
], 'post', project_views.contributor.project_before_remove_contributor, json_renderer),
# TODO(sloria): should be a delete request to /contributors/
Rule([
'/project/<pid>/removecontributors/',
'/project/<pid>/node/<nid>/removecontributors/',
], 'post', project_views.contributor.project_removecontributor, json_renderer),
# Forks
Rule(
[
'/project/<pid>/fork/before/',
'/project/<pid>/node/<nid>/fork/before/',
], 'get', project_views.node.project_before_fork, json_renderer,
),
Rule(
[
'/project/<pid>/fork/',
'/project/<pid>/node/<nid>/fork/',
], 'post', project_views.node.node_fork_page, json_renderer,
),
Rule(
[
'/project/<pid>/pointer/fork/',
'/project/<pid>/node/<nid>/pointer/fork/',
], 'post', project_views.node.fork_pointer, json_renderer,
),
# View forks
Rule([
'/project/<pid>/forks/',
'/project/<pid>/node/<nid>/forks/',
], 'get', project_views.node.node_forks, json_renderer),
# Registrations
Rule([
'/project/<pid>/beforeregister/',
'/project/<pid>/node/<nid>/beforeregister',
], 'get', project_views.register.project_before_register, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'get', project_views.register.node_register_template_page, json_renderer),
Rule([
'/project/<pid>/register/<template>/',
'/project/<pid>/node/<nid>/register/<template>/',
], 'post', project_views.register.node_register_template_page_post, json_renderer),
# Statistics
Rule([
'/project/<pid>/statistics/',
'/project/<pid>/node/<nid>/statistics/',
], 'get', project_views.node.project_statistics, json_renderer),
# Permissions
Rule([
'/project/<pid>/permissions/<permissions>/',
'/project/<pid>/node/<nid>/permissions/<permissions>/',
], 'post', project_views.node.project_set_privacy, json_renderer),
Rule([
'/project/<pid>/permissions/beforepublic/',
'/project/<pid>/node/<nid>/permissions/beforepublic/',
], 'get', project_views.node.project_before_set_public, json_renderer),
### Wiki ###
### Watching ###
Rule([
'/project/<pid>/watch/',
'/project/<pid>/node/<nid>/watch/'
], 'post', project_views.node.watch_post, json_renderer),
Rule([
'/project/<pid>/unwatch/',
'/project/<pid>/node/<nid>/unwatch/'
], 'post', project_views.node.unwatch_post, json_renderer),
Rule([
'/project/<pid>/togglewatch/',
'/project/<pid>/node/<nid>/togglewatch/'
], 'post', project_views.node.togglewatch_post, json_renderer),
Rule([
'/watched/logs/'
], 'get', website_views.watched_logs_get, json_renderer),
### Accounts ###
Rule([
'/user/merge/'
], 'post', auth_views.merge_user_post, json_renderer),
# Combined files
Rule(
[
'/project/<pid>/files/',
'/project/<pid>/node/<nid>/files/'
],
'get',
project_views.file.collect_file_trees,
json_renderer,
),
# Endpoint to fetch Rubeus.JS/Hgrid-formatted data
Rule(
[
'/project/<pid>/files/grid/',
'/project/<pid>/node/<nid>/files/grid/'
],
'get',
project_views.file.grid_data,
json_renderer
),
# Settings
Rule(
'/files/auth/',
'get',
addon_views.get_auth,
json_renderer,
),
Rule(
[
'/project/<pid>/waterbutler/logs/',
'/project/<pid>/node/<nid>/waterbutler/logs/',
],
'put',
addon_views.create_waterbutler_log,
json_renderer,
),
Rule(
[
'/project/<pid>/files/<provider>/<path:path>/',
'/project/<pid>/node/<nid>/files/<provider>/<path:path>/',
],
'get',
addon_views.addon_render_file,
json_renderer
),
Rule(
'/settings/addons/',
'post',
profile_views.user_choose_addons,
json_renderer,
),
Rule(
'/settings/notifications/',
'get',
profile_views.user_notifications,
json_renderer,
),
Rule(
'/settings/notifications/',
'post',
profile_views.user_choose_mailing_lists,
json_renderer,
),
Rule(
'/subscriptions/',
'get',
notification_views.get_subscriptions,
json_renderer,
),
Rule(
[
'/project/<pid>/subscriptions/',
'/project/<pid>/node/<nid>/subscriptions/'
],
'get',
notification_views.get_node_subscriptions,
json_renderer,
),
Rule(
'/subscriptions/',
'post',
notification_views.configure_subscription,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/addons/',
'/project/<pid>/node/<nid>/settings/addons/',
],
'post',
project_views.node.node_choose_addons,
json_renderer,
),
Rule(
[
'/project/<pid>/settings/comments/',
'/project/<pid>/node/<nid>/settings/comments/',
],
'post',
project_views.node.configure_comments,
json_renderer,
),
# Invite Users
Rule(
[
'/project/<pid>/invite_contributor/',
'/project/<pid>/node/<nid>/invite_contributor/'
],
'post',
project_views.contributor.invite_contributor_post,
json_renderer
),
], prefix='/api/v1')
# Set up static routing for addons
# NOTE: We use nginx to serve static addon assets in production
addon_base_path = os.path.abspath('website/addons')
if settings.DEV_MODE:
@app.route('/static/addons/<addon>/<path:filename>')
def addon_static(addon, filename):
addon_path = os.path.join(addon_base_path, addon, 'static')
return send_from_directory(addon_path, filename)
| |
'''
An implementation for creating hierarchical word clusters based on syntactic
context ("Brown clusters"). This is based on the following papers.
* Peter F. Brown; Peter V. deSouza; Robert L. Mercer; T. J. Watson; Vincent J.
Della Pietra; Jenifer C. Lai. 1992. Class-Based n-gram Models of Natural
Language. Computational Linguistics, Volume 18, Number 4.
http://acl.ldc.upenn.edu/J/J92/J92-4003.pdf
* Percy Liang. 2005. Semi-supervised learning for natural language. MIT.
http://cs.stanford.edu/~pliang/papers/meng-thesis.pdf
Some additional references:
* See http://www.cs.columbia.edu/~cs4705/lectures/brown.pdf for a high-level
overview of Brown clustering.
* Here is another implementation of Brown clustering:
https://github.com/percyliang/brown-cluster
NOTE: I am not very confident that this particular class is working properly.
Although it seems to do reasonable things, I haven't tested it against
implementation from Percy Liang (link below) to see what the differences
in output are. Also, if you don't mind the research-only license of the
Liang C++ implementation, that may be preferable since it is probably
faster.
Author: Michael Heilman (mheilman@ets.org, mheilman@cs.cmu.edu)
'''
import random
import argparse
import glob
import re
import itertools
import logging
from collections import defaultdict
from math import log, isnan, isinf
from bs4 import UnicodeDammit
import os,sys
import ipdb,pdb
import pymongo
from pymongo import MongoClient
path_utils = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(path_utils)
from utils_new import getData, uploadObject, permanentFilter, assignFilterTag, filter_names
random.seed(42)
logging.basicConfig(level=logging.INFO, format='%(asctime)s\t\t%(message)s')
#######################################################
START = '_START_'
END = '_END_'
START_TAG = '<START>'
END_TAG = '<STOP>'
RARE = "<RARE>"
BR = "**"
NO_LABELS = [
START_TAG,
END_TAG,
BR,
RARE,
]
#######################################################
###########
# MONGODB CONNECTION
#client = MongoClient()
#db_pointer = client.JobDB.core_tokenized
##########
"""
def read_corpus(path):
corpus = ""
with open(path) as f:
corpus = re.split(r'\s+', f.read().strip())
return corpus
"""
def make_float_defaultdict():
return defaultdict(float)
def make_int_defaultdict():
return defaultdict(int)
class ClassLMClusters(object):
'''
The initializer takes a document generator, which is simply an iterator
over lists of tokens. You can define this however you wish.
'''
def __init__(self, data, batch_size=1000, max_vocab_size=None,
lower=False):
self.batch_size = batch_size
self.dataset = data
self.lower = lower # whether to lowercase everything
self.max_vocab_size = max_vocab_size
# mapping from cluster IDs to cluster IDs,
# to keep track of the hierarchy
self.cluster_parents = {}
self.cluster_counter = 0
# the list of words in the vocabulary and their counts
self.counts = defaultdict(int)
self.trans = defaultdict(make_int_defaultdict)
self.num_tokens = 0
# the graph weights (w) and the effects of merging nodes (L)
# (see Liang's thesis)
self.w = defaultdict(make_float_defaultdict)
self.L = defaultdict(make_float_defaultdict)
# the 0/1 bit to add when walking up the hierarchy
# from a word to the top-level cluster
self.cluster_bits = {}
# load filtered lexicon
self.word_reference = uploadObject(os.path.join(path_utils,'word_dict_filtered')) # word dict from all data (200k)
self.stem_reference = uploadObject(os.path.join(path_utils,'stem_dict'))
# find the most frequent words
self.vocab = {}
self.reverse_vocab = []
self.create_vocab()
# create sets of documents that each word appears in
self.create_index()
# make a copy of the list of words, as a queue for making new clusters
word_queue = list(range(len(self.vocab)))
# score potential clusters, starting with the most frequent words.
# also, remove the batch from the queue
self.current_batch = word_queue[:(self.batch_size + 1)]
word_queue = word_queue[(self.batch_size + 1):]
self.initialize_tables()
while len(self.current_batch) > 1:
# find the best pair of words/clusters to merge
c1, c2 = self.find_best()
# merge the clusters in the index
self.merge(c1, c2)
if word_queue:
new_word = word_queue.pop(0)
self.add_to_batch(new_word)
logging.info('{} AND {} WERE MERGED INTO {}. {} REMAIN.'
.format(self.reverse_vocab[c1] if c1 < len(self.reverse_vocab) else c1,
self.reverse_vocab[c2] if c2 < len(self.reverse_vocab) else c2,
self.cluster_counter,
len(self.current_batch) + len(word_queue) - 1))
self.cluster_counter += 1
def corpus_generator(self):
for doc in self.dataset.seq_list:
for i in range(len(doc.x)):
token = doc.sequence_list.x_dict.get_label_name(doc.x[i])
if token in NO_LABELS:
yield token
else:
tok = permanentFilter(token)
if tok not in filter_names and tok.lower() not in self.word_reference:
tok = assignFilterTag(tok.lower())
if self.lower and tok == token:
tok = tok.lower()
yield tok
"""
def corpus_generator(self): # from DB
for doc in db_pointer:
tokens = doc.get('tokens',[])
for sent in tokens:
for word in sent:
if self.lower:
word = word.lower()
yield word
"""
def create_index(self):
corpus_iter1, corpus_iter2 = itertools.tee(self.corpus_generator())
# increment one iterator to get consecutive tokens
next(corpus_iter2)
for w1, w2 in zip(corpus_iter1, corpus_iter2):
if w1 in self.vocab and w2 in self.vocab:
self.trans[self.vocab[w1]][self.vocab[w2]] += 1
logging.info('{} word tokens were processed.'.format(self.num_tokens))
def create_vocab(self):
tmp_counts = defaultdict(int)
for w in self.corpus_generator():
tmp_counts[w] += 1
self.num_tokens += 1
words = sorted(tmp_counts.keys(), key=lambda w: tmp_counts[w],
reverse=True)
too_rare = 0
if self.max_vocab_size is not None \
and len(words) > self.max_vocab_size:
too_rare = tmp_counts[words[self.max_vocab_size]]
if too_rare == tmp_counts[words[0]]:
too_rare += 1
logging.info("max_vocab_size too low. Using all words that" +
" appeared > {} times.".format(too_rare))
for i, w in enumerate(w for w in words if tmp_counts[w] > too_rare):
self.vocab[w] = i
self.counts[self.vocab[w]] = tmp_counts[w]
self.reverse_vocab = sorted(self.vocab.keys(),
key=lambda w: self.vocab[w])
self.cluster_counter = len(self.vocab)
def initialize_tables(self):
logging.info("initializing tables")
# edges between nodes
for c1, c2 in itertools.combinations(self.current_batch, 2):
w = self.compute_weight([c1], [c2]) \
+ self.compute_weight([c2], [c1])
if w:
self.w[c1][c2] = w
# edges to and from a single node
for c in self.current_batch:
w = self.compute_weight([c], [c])
if w:
self.w[c][c] = w
num_pairs = 0
for c1, c2 in itertools.combinations(self.current_batch, 2):
self.compute_L(c1, c2)
num_pairs += 1
if num_pairs % 1000 == 0:
logging.info("{} pairs precomputed".format(num_pairs))
def compute_weight(self, nodes1, nodes2):
paircount = 0
for n1 in nodes1:
for n2 in nodes2:
paircount += self.trans[n1][n2]
if not paircount:
# TODO is there some better option than returning 0 (indicating no weight)?
# Otherwise, it would return 0 * infinity...
return 0.0
count_1 = 0
count_2 = 0
for n in nodes1:
count_1 += self.counts[n]
for n in nodes2:
count_2 += self.counts[n]
# convert to floats
num_tokens = float(self.num_tokens)
paircount = float(paircount)
count_1 = float(count_1)
count_2 = float(count_2)
return (paircount / num_tokens) \
* log(paircount * num_tokens / count_1 / count_2)
def compute_L(self, c1, c2):
val = 0.0
# add the weight of edges coming in to the potential
# new cluster from other nodes
# TODO this is slow
for d in self.current_batch:
val += self.compute_weight([c1, c2], [d])
val += self.compute_weight([d], [c1, c2])
# ... but don't include what will be part of the new cluster
for d in [c1, c2]:
val -= self.compute_weight([c1, c2], [d])
val -= self.compute_weight([d], [c1, c2])
# add the weight of the edge from the potential new cluster
# to itself
val += self.compute_weight([c1, c2], [c1, c2])
# subtract the weight of edges to/from c1, c2
# (which would be removed)
for d in self.current_batch:
for c in [c1, c2]:
if d in self.w[c]:
val -= self.w[c][d]
elif c in self.w[d]:
val -= self.w[d][c]
self.L[c1][c2] = val
def find_best(self):
best_score = float('-inf')
argmax = None
for c1 in self.L:
for c2, score in self.L[c1].items():
if score > best_score:
argmax = [(c1, c2)]
best_score = score
elif score == best_score:
argmax.append((c1, c2))
if isnan(best_score) or isinf(best_score):
raise ValueError("bad value for score: {}".format(best_score))
# break ties randomly (randint takes inclusive args!)
c1, c2 = argmax[random.randint(0, len(argmax) - 1)]
return c1, c2
def merge(self, c1, c2):
c_new = self.cluster_counter
# record parents
self.cluster_parents[c1] = c_new
self.cluster_parents[c2] = c_new
r = random.randint(0, 1)
self.cluster_bits[c1] = str(r) # assign bits randomly
self.cluster_bits[c2] = str(1 - r)
# add the new cluster to the counts and transitions dictionaries
self.counts[c_new] = self.counts[c1] + self.counts[c2]
for c in [c1, c2]:
for d, val in self.trans[c].items():
if d == c1 or d == c2:
d = c_new
self.trans[c_new][d] += val
# subtract the weights for the merged nodes from the score table
# TODO this is slow
for c in [c1, c2]:
for d1 in self.L:
for d2 in self.L[d1]:
self.L[d1][d2] -= self.compute_weight([d1, d2], [c])
self.L[d1][d2] -= self.compute_weight([c], [d1, d2])
# remove merged clusters from the counts and transitions dictionaries
# to save memory (but keep frequencies for words for the final output)
if c1 >= len(self.vocab):
del self.counts[c1]
if c2 >= len(self.vocab):
del self.counts[c2]
del self.trans[c1]
del self.trans[c2]
for d in self.trans:
for c in [c1, c2]:
if c in self.trans[d]:
del self.trans[d][c]
# remove the old clusters from the w and L tables
for table in [self.w, self.L]:
for d in table:
if c1 in table[d]:
del table[d][c1]
if c2 in table[d]:
del table[d][c2]
if c1 in table:
del table[c1]
if c2 in table:
del table[c2]
# remove the merged items
self.current_batch.remove(c1)
self.current_batch.remove(c2)
# add the new cluster to the w and L tables
self.add_to_batch(c_new)
def add_to_batch(self, c_new):
# compute weights for edges connected to the new node
for d in self.current_batch:
self.w[d][c_new] = self.compute_weight([d], [c_new])
self.w[d][c_new] = self.compute_weight([c_new], [d])
self.w[c_new][c_new] = self.compute_weight([c_new], [c_new])
# add the weights from this new node to the merge score table
# TODO this is slow
for d1 in self.L:
for d2 in self.L[d1]:
self.L[d1][d2] += self.compute_weight([d1, d2], [c_new])
self.L[d1][d2] += self.compute_weight([c_new], [d1, d2])
# compute scores for merging it with all clusters in the current batch
for d in self.current_batch:
self.compute_L(d, c_new)
# now add it to the batch
self.current_batch.append(c_new)
def get_bitstring(self, w):
# walk up the cluster hierarchy until there is no parent cluster
cur_cluster = self.vocab[w]
bitstring = ""
while cur_cluster in self.cluster_parents:
bitstring = self.cluster_bits[cur_cluster] + bitstring
cur_cluster = self.cluster_parents[cur_cluster]
return bitstring
def save_clusters(self, output_path):
with open(output_path, 'w') as f:
for w in self.vocab:
# convert the counts back to ints when printing
f.write("{}\t{}\t{}\n".format(w, self.get_bitstring(w),
self.counts[self.vocab[w]]))
def main():
parser = argparse.ArgumentParser(description='Create hierarchical word' +
' clusters from a corpus, following' +
' Brown et al. (1992).')
parser.add_argument('input_path', help='input file, ' +
'with tokens whitespace separated')
parser.add_argument('output_path', help='output path')
parser.add_argument('--max_vocab_size', help='maximum number of words in' +
' the vocabulary (a smaller number will be used if' +
' there are ties at the specified level)',
default=None, type=int)
parser.add_argument('--batch_size', help='number of clusters to merge at' +
' one time (runtime is quadratic in this value)',
default=1000, type=int)
parser.add_argument('--lower', help='lowercase the input',
action='store_true')
args = parser.parse_args()
c = ClassLMClusters(args.input_path,
max_vocab_size=args.max_vocab_size,
batch_size=args.batch_size, lower=args.lower)
c.save_clusters(args.output_path)
if __name__ == '__main__':
print("Reading corpus...")
#train,_,_ = getData(test = 0, val = 0, START_END_TAGS=False, extended_filter=False)
train,_,_ = getData(START_END_TAGS=False)
print("Clustering...")
clustering = ClassLMClusters(data = train, lower = True)
print("Saving clusters...")
clustering.save_clusters("output")
| |
"""
@package mi.instrument.noaa.driver
@file marine-integrations/mi/instrument/noaa/particles.py
@author Pete Cable
@brief Particles for BOTPT
Release notes:
"""
import re
import time
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle, DataParticleKey, CommonDataParticleType
from mi.core.exceptions import SampleException
from mi.core.log import get_logging_metaclass
from mi.core.time_tools import timegm_to_float
__author__ = 'Pete Cable'
__license__ = 'Apache 2.0'
METALOGGER = get_logging_metaclass('trace')
NEWLINE = '\n'
common_regex_items = {
'float': r'\s*-?\d*\.\d*\s*',
'int': r'\s*-?\d+\s*',
'date_time': r'\s*\d{4}/\d{2}/\d{2}\s\d{2}:\d{2}:\d{2}\.?\d*\s*',
'word': r'\s*\S+\s*',
'newline': NEWLINE
}
class DataParticleType(BaseEnum):
RAW = CommonDataParticleType.RAW
LILY_SAMPLE = 'botpt_lily_sample'
LILY_LEVELING = 'botpt_lily_leveling'
IRIS_SAMPLE = 'botpt_iris_sample'
NANO_SAMPLE = 'botpt_nano_sample'
HEAT_SAMPLE = 'botpt_heat_sample'
BOTPT_STATUS = 'botpt_status'
class IrisSampleParticleKey(BaseEnum):
SENSOR_ID = "sensor_id"
TIME = "date_time_string"
X_TILT = "iris_x_tilt"
Y_TILT = "iris_y_tilt"
TEMP = "iris_temp"
SN = "serial_number"
class HeatSampleParticleKey(BaseEnum):
SENSOR_ID = 'sensor_id'
TIME = "date_time_string"
X_TILT = "heat_x_tilt"
Y_TILT = "heat_y_tilt"
TEMP = "heat_temp"
class LilySampleParticleKey(BaseEnum):
SENSOR_ID = 'sensor_id'
TIME = "date_time_string"
X_TILT = "lily_x_tilt"
Y_TILT = "lily_y_tilt"
MAG_COMPASS = "compass_direction"
TEMP = "lily_temp"
SUPPLY_VOLTS = "supply_voltage"
SN = "serial_number"
class LilyLevelingParticleKey(BaseEnum):
SENSOR_ID = "sensor_id"
TIME = "date_time_string"
X_TILT = "lily_x_tilt"
Y_TILT = "lily_y_tilt"
MAG_COMPASS = "compass_direction"
TEMP = "lily_temp"
SUPPLY_VOLTS = "supply_voltage"
SN = "serial_number"
STATUS = "lily_leveling_status"
class NanoSampleParticleKey(BaseEnum):
SENSOR_ID = 'sensor_id'
TIME = "date_time_string"
PPS_SYNC = "time_sync_flag"
PRESSURE = "bottom_pressure"
TEMP = "press_trans_temp"
class BotptStatusParticleKey(BaseEnum):
IRIS1 = 'botpt_iris_status_01'
IRIS2 = 'botpt_iris_status_02'
LILY1 = 'botpt_lily_status_01'
LILY2 = 'botpt_lily_status_02'
NANO = 'botpt_nano_status'
SYST = 'botpt_syst_status'
class BotptDataParticle(DataParticle):
_compiled_regex = None
_compile_flags = None
__metaclass__ = METALOGGER
def __init__(self, *args, **kwargs):
"""
Initialize the BotptDataParticle base class.
perform the regex match, raise exception if no match found
@throws SampleException
"""
super(BotptDataParticle, self).__init__(*args, **kwargs)
self.contents[DataParticleKey.PREFERRED_TIMESTAMP] = DataParticleKey.INTERNAL_TIMESTAMP
self.match = self.regex_compiled().match(self.raw_data)
if not self.match:
raise SampleException("No regex match of parsed sample data: [%r]" % self.raw_data)
@staticmethod
def regex():
raise NotImplemented()
@classmethod
def regex_compiled(cls):
"""
Compile the regex, caching the result for future calls
@return: compiled regex
"""
if cls._compiled_regex is None:
if cls._compile_flags is None:
cls._compiled_regex = re.compile(cls.regex())
else:
cls._compiled_regex = re.compile(cls.regex(), cls._compile_flags)
return cls._compiled_regex
def set_botpt_timestamp(self):
"""
Set the internal timestamp based on the embedded timestamp in the sample
"""
ts = self.match.group('date_time')
if '.' in ts:
ts, right = ts.split('.', 1)
fraction = float('.' + right)
else:
fraction = 0
timestamp = time.strptime(ts, "%Y/%m/%d %H:%M:%S")
self.set_internal_timestamp(unix_time=timegm_to_float(timestamp) + fraction)
def _encode_all(self):
"""
Default implementation, return empty list
@return: list of encoded values
"""
return []
def _build_parsed_values(self):
"""
@throws SampleException If there is a problem with sample creation
"""
try:
self.set_botpt_timestamp()
result = self._encode_all()
except Exception as e:
raise SampleException("Exception [%s] while converting data: [%s]" % (e, self.raw_data))
return result
@staticmethod
def _filter(filter_string):
"""
Generate a filter function based on the supplied filter string
@param filter_string
@return: filter function
"""
def inner(data):
return NEWLINE.join(line for line in data.split(NEWLINE) if line.startswith(filter_string))
return inner
class IrisSampleParticle(BotptDataParticle):
_data_particle_type = DataParticleType.IRIS_SAMPLE
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
IRIS,2013/05/29 00:25:36, -0.0885, -0.7517,28.49,N8642
@return: regex string
"""
pattern = r'''
(?x) # verbose
IRIS,
(?P<date_time> %(date_time)s ),
(?P<x_tilt> %(float)s ),
(?P<y_tilt> %(float)s ),
(?P<temp> %(float)s ),
(?P<serial> %(word)s )
''' % common_regex_items
return pattern
def _encode_all(self):
return [
self._encode_value(IrisSampleParticleKey.SENSOR_ID, 'IRIS', str),
self._encode_value(IrisSampleParticleKey.TIME, self.match.group('date_time'), str),
self._encode_value(IrisSampleParticleKey.X_TILT, self.match.group('x_tilt'), float),
self._encode_value(IrisSampleParticleKey.Y_TILT, self.match.group('y_tilt'), float),
self._encode_value(IrisSampleParticleKey.TEMP, self.match.group('temp'), float),
self._encode_value(IrisSampleParticleKey.SN, self.match.group('serial').strip(), str)
]
class HeatSampleParticle(BotptDataParticle):
_data_particle_type = DataParticleType.HEAT_SAMPLE
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
HEAT,2013/04/23 18:24:46,0000,0001,0025
@return: regex string
"""
pattern = r'''
(?x) # verbose
HEAT,
(?P<date_time> %(date_time)s ),
(?P<x_tilt> %(int)s ),
(?P<y_tilt> %(int)s ),
(?P<temp> %(int)s )
''' % common_regex_items
return pattern
def _encode_all(self):
return [
self._encode_value(HeatSampleParticleKey.SENSOR_ID, 'HEAT', str),
self._encode_value(HeatSampleParticleKey.TIME, self.match.group('date_time'), str),
self._encode_value(HeatSampleParticleKey.X_TILT, self.match.group('x_tilt'), int),
self._encode_value(HeatSampleParticleKey.Y_TILT, self.match.group('y_tilt'), int),
self._encode_value(HeatSampleParticleKey.TEMP, self.match.group('temp'), int)
]
class LilySampleParticle(BotptDataParticle):
_data_particle_type = DataParticleType.LILY_SAMPLE
def __init__(self, *args, **kwargs):
super(LilySampleParticle, self).__init__(*args, **kwargs)
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
LILY,2013/06/24 23:22:00,-236.026, 25.666,194.25, 26.01,11.96,N9655
@return: regex string
"""
pattern = r'''
(?x) # verbose
LILY,
(?P<date_time> %(date_time)s ),
(?P<x_tilt> %(float)s ),
(?P<y_tilt> %(float)s ),
(?P<compass> %(float)s ),
(?P<temp> %(float)s ),
(?P<volts> %(float)s ),
(?P<serial> %(word)s )
''' % common_regex_items
return pattern
def _encode_all(self):
return [
self._encode_value(LilySampleParticleKey.SENSOR_ID, 'LILY', str),
self._encode_value(LilySampleParticleKey.TIME, self.match.group('date_time'), str),
self._encode_value(LilySampleParticleKey.X_TILT, self.match.group('x_tilt'), float),
self._encode_value(LilySampleParticleKey.Y_TILT, self.match.group('y_tilt'), float),
self._encode_value(LilySampleParticleKey.MAG_COMPASS, self.match.group('compass'), float),
self._encode_value(LilySampleParticleKey.TEMP, self.match.group('temp'), float),
self._encode_value(LilySampleParticleKey.SUPPLY_VOLTS, self.match.group('volts'), float),
self._encode_value(LilySampleParticleKey.SN, self.match.group('serial').strip(), str),
]
class NanoSampleParticle(BotptDataParticle):
_data_particle_type = DataParticleType.NANO_SAMPLE
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
NANO,V,2013/08/22 22:48:36.013,13.888533,26.147947328
@return: regex string
"""
pattern = '''
(?x)
NANO,
(?P<pps_sync> V|P ),
(?P<date_time> %(date_time)s ),
(?P<pressure> %(float)s ), # PSI
(?P<temp> %(float)s ) # deg C
%(newline)s
''' % common_regex_items
return pattern
def _encode_all(self):
return [
self._encode_value(NanoSampleParticleKey.SENSOR_ID, 'NANO', str),
self._encode_value(NanoSampleParticleKey.TIME, self.match.group('date_time'), str),
self._encode_value(NanoSampleParticleKey.PRESSURE, self.match.group('pressure'), float),
self._encode_value(NanoSampleParticleKey.TEMP, self.match.group('temp'), float),
self._encode_value(NanoSampleParticleKey.PPS_SYNC, self.match.group('pps_sync'), str),
]
# ##############################################################################
# Leveling Particles
###############################################################################
class LilyLevelingParticle(BotptDataParticle):
_data_particle_type = DataParticleType.LILY_LEVELING
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
Sample Data:
LILY,2013/07/24 19:37:12,* -7.625, 108.257,185.26, 28.14,11.87,N9651
LILY,2013/06/28 18:04:41,* -7.390, -14.063,190.91, 25.83,,Switching to Y!11.87,N9651
LILY,2013/06/28 17:29:21,* -2.277, -2.165,190.81, 25.69,,Leveled!11.87,N9651
LILY,2013/07/02 23:41:27,* -5.296, -2.640,185.18, 28.44,,Leveled!11.87,N9651
LILY,2013/03/22 19:07:28,*-330.000,-330.000,185.45, -6.45,,X Axis out of range, switching to Y!11.37,N9651
LILY,2013/03/22 19:07:29,*-330.000,-330.000,184.63, -6.43,,Y Axis out of range!11.34,N9651
"""
pattern = r'''
(?x) # verbose
LILY,
(?P<date_time> %(date_time)s ),
\* # leveling marker
(?P<x_tilt> %(float)s ),
(?P<y_tilt> %(float)s ),
(?P<compass> %(float)s ),
(?P<temp> %(float)s ),
(?P<volts> %(float)s|,\D*%(float)s ), # leveling status stuffed here, mangled
(?P<serial> %(word)s )
''' % common_regex_items
return pattern
def _encode_all(self):
# handle the mangled leveling status...
status = None
supply_volts = self.match.group('volts')
if supply_volts.startswith(','):
status, supply_volts = supply_volts.split('!')
status = status[1:]
return [
self._encode_value(LilyLevelingParticleKey.SENSOR_ID, 'LILY', str),
self._encode_value(LilyLevelingParticleKey.TIME, self.match.group('date_time'), str),
self._encode_value(LilyLevelingParticleKey.X_TILT, self.match.group('x_tilt'), float),
self._encode_value(LilyLevelingParticleKey.Y_TILT, self.match.group('y_tilt'), float),
self._encode_value(LilyLevelingParticleKey.MAG_COMPASS, self.match.group('compass'), float),
self._encode_value(LilyLevelingParticleKey.TEMP, self.match.group('temp'), float),
self._encode_value(LilyLevelingParticleKey.SUPPLY_VOLTS, supply_volts, float),
self._encode_value(LilyLevelingParticleKey.STATUS, status, str),
self._encode_value(LilyLevelingParticleKey.SN, self.match.group('serial').strip(), str)
]
###############################################################################
# Status Particle
###############################################################################
class BotptStatusParticle(BotptDataParticle):
_data_particle_type = DataParticleType.BOTPT_STATUS
_compile_flags = re.DOTALL
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
"""
return r'''
(?x)
(SYST,)
(?P<date_time> %(date_time)s)(,)
(?P<syst_status> \*BOTPT.*?root/bin)\n
(?P<lily_status1> LILY,%(date_time)s,\*APPLIED.*?,\*9900XY-DUMP-SETTINGS)\n
(?P<lily_status2> LILY,%(date_time)s,\*01:\ TBias.*?,\*9900XY-DUMP2)\n
(?P<iris_status1> IRIS,%(date_time)s,\*APPLIED.*?,\*9900XY-DUMP-SETTINGS)\n
(?P<iris_status2> IRIS,%(date_time)s,\*01:\ TBias.*?,\*9900XY-DUMP2)\n
(?P<nano_status> NANO,\*_____.*?ZV:\S+)
''' % common_regex_items
def _to_dict(self, sample):
result = {}
for each in sample:
result[each[DataParticleKey.VALUE_ID]] = each[DataParticleKey.VALUE]
return result
def _encode_all(self):
syst_status = 'SYST,%s,%s' % (self.match.group('date_time'), self.match.group('syst_status'))
return [
self._encode_value(BotptStatusParticleKey.IRIS1, self.match.group('iris_status1'), self._filter('IRIS')),
self._encode_value(BotptStatusParticleKey.IRIS2, self.match.group('iris_status2'), self._filter('IRIS')),
self._encode_value(BotptStatusParticleKey.LILY1, self.match.group('lily_status1'), self._filter('LILY')),
self._encode_value(BotptStatusParticleKey.LILY2, self.match.group('lily_status2'), self._filter('LILY')),
self._encode_value(BotptStatusParticleKey.NANO, self.match.group('nano_status'), self._filter('NANO')),
self._encode_value(BotptStatusParticleKey.SYST, syst_status, self._filter('SYST')),
]
# ##############################################################################
# Individual Status Particles
# These exist only to contain the regular expressions for filtering in the driver
###############################################################################
class IrisStatusParticle1(BotptDataParticle):
_compile_flags = re.DOTALL
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
Sample Data:
IRIS,2013/06/19 21:13:00,*APPLIED GEOMECHANICS Model MD900-T Firmware V5.2 SN-N3616 ID01
IRIS,2013/06/12 18:03:44,*01: Vbias= 0.0000 0.0000 0.0000 0.0000
IRIS,2013/06/12 18:03:44,*01: Vgain= 0.0000 0.0000 0.0000 0.0000
IRIS,2013/06/12 18:03:44,*01: Vmin: -2.50 -2.50 2.50 2.50
IRIS,2013/06/12 18:03:44,*01: Vmax: 2.50 2.50 2.50 2.50
IRIS,2013/06/12 18:03:44,*01: a0= 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
IRIS,2013/06/12 18:03:44,*01: a1= 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
IRIS,2013/06/12 18:03:44,*01: a2= 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
IRIS,2013/06/12 18:03:44,*01: a3= 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
IRIS,2013/06/12 18:03:44,*01: Tcoef 0: Ks= 0 Kz= 0 Tcal= 0
HEAT,2013/06/12 18:04:02,-001,0001,0024
IRIS,2013/06/12 18:03:44,*01: Tcoef 1: Ks= 0 Kz= 0 Tcal= 0
IRIS,2013/06/12 18:03:44,*01: N_SAMP= 460 Xzero= 0.00 Yzero= 0.00
IRIS,2013/06/12 18:03:44,*01: TR-PASH-OFF E99-ON SO-NMEA-SIM XY-EP 9600 baud FV-
IRIS,2013/06/12 18:03:44,*01:*9900XY-DUMP-SETTINGS
"""
return r'''
(?x) # verbose
(?P<name> IRIS)(,)
(?P<date_time> %(date_time)s)(,)
(?P<status> \*APPLIED.*)
(IRIS,%(date_time)s,\*9900XY-DUMP-SETTINGS)
''' % common_regex_items
class IrisStatusParticle2(BotptDataParticle):
_compile_flags = re.DOTALL
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
Sample Data:
IRIS,2013/06/12 23:55:09,*01: TBias: 8.85
IRIS,2013/06/12 23:55:09,*Above 0.00(KZMinTemp): kz[0]= 0, kz[1]= 0
IRIS,2013/06/12 23:55:09,*Below 0.00(KZMinTemp): kz[2]= 0, kz[3]= 0
IRIS,2013/06/12 18:04:01,*01: ADCDelay: 310
IRIS,2013/06/12 18:04:01,*01: PCA Model: 90009-01
IRIS,2013/06/12 18:04:01,*01: Firmware Version: 5.2 Rev N
LILY,2013/06/12 18:04:01,-330.000,-247.647,290.73, 24.50,11.88,N9656
IRIS,2013/06/12 18:04:01,*01: X Ch Gain= 1.0000, Y Ch Gain= 1.0000, Temperature Gain= 1.0000
IRIS,2013/06/12 18:04:01,*01: Output Mode: Degrees
IRIS,2013/06/12 18:04:01,*01: Calibration performed in Degrees
IRIS,2013/06/12 18:04:01,*01: Control: Off
IRIS,2013/06/12 18:04:01,*01: Using RS232
IRIS,2013/06/12 18:04:01,*01: Real Time Clock: Not Installed
IRIS,2013/06/12 18:04:01,*01: Use RTC for Timing: No
IRIS,2013/06/12 18:04:01,*01: External Flash Capacity: 0 Bytes(Not Installed)
IRIS,2013/06/12 18:04:01,*01: Relay Thresholds:
IRIS,2013/06/12 18:04:01,*01: Xpositive= 1.0000 Xnegative=-1.0000
IRIS,2013/06/12 18:04:01,*01: Ypositive= 1.0000 Ynegative=-1.0000
IRIS,2013/06/12 18:04:01,*01: Relay Hysteresis:
IRIS,2013/06/12 18:04:01,*01: Hysteresis= 0.0000
IRIS,2013/06/12 18:04:01,*01: Calibration method: Dynamic
IRIS,2013/06/12 18:04:01,*01: Positive Limit=26.25 Negative Limit=-26.25
IRIS,2013/06/12 18:04:02,*01: Calibration Points:025 X: Disabled Y: Disabled
IRIS,2013/06/12 18:04:02,*01: Biaxial Sensor Type (0)
IRIS,2013/06/12 18:04:02,*01: ADC: 12-bit (internal)
IRIS,2013/06/12 18:04:02,*01: DAC Output Scale Factor: 0.10 Volts/Degree
HEAT,2013/06/12 18:04:02,-001,0001,0024
IRIS,2013/06/12 18:04:02,*01: Total Sample Storage Capacity: 372
IRIS,2013/06/12 18:04:02,*01: BAE Scale Factor: 2.88388 (arcseconds/bit)
"""
return r'''
(?x) # verbose
(?P<name> IRIS)(,)
(?P<date_time> %(date_time)s)(,)
(?P<status> \*01:\ TBias.*?)
(IRIS,%(date_time)s,\*9900XY-DUMP2)
''' % common_regex_items
class LilyStatusParticle1(BotptDataParticle):
_compile_flags = re.DOTALL
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
Sample Data:
LILY,2014/06/09 18:13:50,*APPLIED GEOMECHANICS LILY Firmware V2.1 SN-N9651 ID01
LILY,2014/06/09 18:13:50,*01: Vbias= 0.0000 0.0000 0.0000 0.0000
LILY,2014/06/09 18:13:50,*01: Vgain= 0.0000 0.0000 0.0000 0.0000
LILY,2014/06/09 18:13:50,*01: Vmin: -2.50 -2.50 2.50 2.50
LILY,2014/06/09 18:13:50,*01: Vmax: 2.50 2.50 2.50 2.50
LILY,2014/06/09 18:13:50,*01: a0= 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
LILY,2014/06/09 18:13:50,*01: a1= 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
LILY,2014/06/09 18:13:50,*01: a2= 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
LILY,2014/06/09 18:13:50,*01: a3= 0.00000 0.00000 0.00000 0.00000 0.00000 0.00000
LILY,2014/06/09 18:13:50,*01: Tcoef 0: Ks= 0 Kz= 0 Tcal= 0
LILY,2014/06/09 18:13:51,*01: Tcoef 1: Ks= 0 Kz= 0 Tcal= 0
LILY,2014/06/09 18:13:51,*01: N_SAMP= 28 Xzero= 0.00 Yzero= 0.00
LILY,2014/06/09 18:13:51,*01: TR-PASH-OFF E99-ON SO-NMEA-SIM XY-EP 19200 baud FV-
LILY,2014/06/09 18:13:51,*9900XY-DUMP-SETTINGS
"""
return r'''
(?x) # verbose
(?P<name> LILY)(,)
(?P<date_time> %(date_time)s)(,)
(?P<status> \*APPLIED.*?)
(LILY,%(date_time)s,\*9900XY-DUMP-SETTINGS)
''' % common_regex_items
class LilyStatusParticle2(BotptDataParticle):
_compile_flags = re.DOTALL
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
Sample Data:
LILY,2014/06/09 18:04:32,*01: TBias: 3.00
LILY,2014/06/09 18:04:32,*01: Above 0.00(KZMinTemp): kz[0]= 0, kz[1]= 0
LILY,2014/06/09 18:04:32,*01: Below 0.00(KZMinTemp): kz[2]= 0, kz[3]= 0
LILY,2014/06/09 18:04:32,*01: ADCDelay: 310
LILY,2014/06/09 18:04:32,*01: PCA Model: 84833-14
LILY,2014/06/09 18:04:32,*01: Firmware Version: 2.1 Rev D
LILY,2014/06/09 18:04:32,*01: X Ch Gain= 1.0000, Y Ch Gain= 1.0000, Temperature Gain= 1.0000
LILY,2014/06/09 18:04:32,*01: Calibrated in uRadian, Current Output Mode: uRadian
LILY,2014/06/09 18:04:32,*01: Using RS232
LILY,2014/06/09 18:04:32,*01: Real Time Clock: Installed
LILY,2014/06/09 18:04:32,*01: Use RTC for Timing: Yes
LILY,2014/06/09 18:04:32,*01: External Flash: 2162688 Bytes Installed
LILY,2014/06/09 18:04:32,*01: Flash Status (in Samples) (Used/Total): (107/55424)
LILY,2014/06/09 18:04:32,*01: Low Power Logger Data Rate: -1 Seconds per Sample
LILY,2014/06/09 18:04:32,*01: Calibration method: Dynamic
LILY,2014/06/09 18:04:32,*01: Positive Limit=330.00 Negative Limit=-330.00
LILY,2014/06/09 18:04:32,*01: Calibration Points:023 X: Enabled Y: Enabled
LILY,2014/06/09 18:04:32,*01: Uniaxial (x2) Sensor Type (1)
LILY,2014/06/09 18:04:32,*01: ADC: 16-bit(external)
LILY,2014/06/09 18:04:32,*01: Compass: Installed Magnetic Declination: 0.000000
LILY,2014/06/09 18:04:32,*01: Compass: Xoffset: 124, Yoffset: 196, Xrange: 1349, Yrange: 1364
LILY,2014/06/09 18:04:32,*01: PID Coeff: iMax:100.0, iMin:-100.0, iGain:0.0150, pGain: 2.50, dGain: 10.0
LILY,2014/06/09 18:04:32,*01: Motor I_limit: 90.0mA
LILY,2014/06/09 18:04:33,*01: Current Time: 12/12/00 00:32:30
LILY,2014/06/09 18:04:33,*01: Supply Voltage: 11.87 Volts
LILY,2014/06/09 18:04:33,*01: Memory Save Mode: Off
LILY,2014/06/09 18:04:33,*01: Outputting Data: No
LILY,2014/06/09 18:04:33,*01: Auto Power-Off Recovery Mode: On
LILY,2014/06/09 18:04:33,*01: Advanced Memory Mode: Off, Delete with XY-MEMD: No
LILY,2014/06/09 18:04:33,*9900XY-DUMP2
"""
return r'''
(?x) # verbose
(?P<name> LILY)(,)
(?P<date_time> %(date_time)s)(,)
(?P<status> \*01:\ TBias.*?)(\n)
(LILY,%(date_time)s,\*9900XY-DUMP2)
''' % common_regex_items
class NanoStatusParticle(BotptDataParticle):
_compile_flags = re.DOTALL
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
Sample Data:
NANO,*______________________________________________________________
NANO,*PAROSCIENTIFIC SMT SYSTEM INFORMATION
NANO,*Model Number: 42.4K-265
NANO,*Serial Number: 120785
NANO,*Firmware Revision: R5.20
NANO,*Firmware Release Date: 03-25-13
NANO,*PPS status: V : PPS signal NOT detected.
NANO,*--------------------------------------------------------------
NANO,*AA:7.161800 AC:7.290000 AH:160.0000 AM:0
NANO,*AP:0 AR:160.0000 BL:0 BR1:115200
NANO,*BR2:115200 BV:10.9 BX:112 C1:-9747.897
NANO,*C2:288.5739 C3:27200.78 CF:BA0F CM:4
NANO,*CS:7412 D1:.0572567 D2:.0000000 DH:2000.000
NANO,*DL:0 DM:0 DO:0 DP:6
NANO,*DZ:.0000000 EM:0 ET:0 FD:.153479
NANO,*FM:0 GD:0 GE:2 GF:0
NANO,*GP:: GT:1 IA1:8 IA2:12
NANO,*IB:0 ID:1 IE:0 IK:46
NANO,*IM:0 IS:5 IY:0 KH:0
NANO,*LH:2250.000 LL:.0000000 M1:13.880032 M3:14.090198
NANO,*MA: MD:0 MU: MX:0
NANO,*NO:0 OI:0 OP:2100.000 OR:1.00
NANO,*OY:1.000000 OZ:0 PA:.0000000 PC:.0000000
NANO,*PF:2000.000 PI:25 PL:2400.000 PM:1.000000
NANO,*PO:0 PR:238 PS:0 PT:N
NANO,*PX:3 RE:0 RS:5 RU:0
NANO,*SD:12 SE:0 SI:OFF SK:0
NANO,*SL:0 SM:OFF SP:0 ST:10
NANO,*SU:0 T1:30.00412 T2:1.251426 T3:50.64434
NANO,*T4:134.5816 T5:.0000000 TC:.6781681 TF:.00
NANO,*TH:1,P4;>OK TI:25 TJ:2 TP:0
NANO,*TQ:1 TR:952 TS:1 TU:0
NANO,*U0:5.839037 UE:0 UF:1.000000
NANO,*UL: UM:user UN:1
NANO,*US:0 VP:4 WI:Def=15:00-061311
NANO,*XC:8 XD:A XM:1 XN:0
NANO,*XS:0011 XX:1 Y1:-3818.141 Y2:-10271.53
NANO,*Y3:.0000000 ZE:0 ZI:0 ZL:0
NANO,*ZM:0 ZS:0 ZV:.0000000
"""
return r'''
(?x) # verbose
(?P<name> NANO)(,)
(?P<status> \*_____.*?ZV:\S+)
''' % common_regex_items
def set_botpt_timestamp(self):
"""
Overridden, no timestamp in this status
"""
self.contents[DataParticleKey.INTERNAL_TIMESTAMP] = self.contents[DataParticleKey.PORT_TIMESTAMP]
class SystStatusParticle(BotptDataParticle):
_compile_flags = re.DOTALL
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
Sample Data:
SYST,2014/04/07 20:46:35,*BOTPT BPR and tilt instrument controller
SYST,2014/04/07 20:46:35,*ts7550n3
SYST,2014/04/07 20:46:35,*System uptime
SYST,2014/04/07 20:46:35,* 20:17:02 up 13 days, 19:11, 0 users, load average: 0.00, 0.00, 0.00
SYST,2014/04/07 20:46:35,*Memory stats
SYST,2014/04/07 20:46:35,* total used free shared buffers cached
SYST,2014/04/07 20:46:35,*Mem: 62888 18520 44368 0 2260 5120
SYST,2014/04/07 20:46:35,*-/+ buffers/cache: 11140 51748
SYST,2014/04/07 20:46:35,*Swap: 0 0 0
SYST,2014/04/07 20:46:35,*MemTotal: 62888 kB
SYST,2014/04/07 20:46:35,*MemFree: 44392 kB
SYST,2014/04/07 20:46:35,*Buffers: 2260 kB
SYST,2014/04/07 20:46:35,*Cached: 5120 kB
SYST,2014/04/07 20:46:35,*SwapCached: 0 kB
SYST,2014/04/07 20:46:35,*Active: 10032 kB
SYST,2014/04/07 20:46:35,*Inactive: 3328 kB
SYST,2014/04/07 20:46:35,*SwapTotal: 0 kB
SYST,2014/04/07 20:46:35,*SwapFree: 0 kB
SYST,2014/04/07 20:46:35,*Dirty: 0 kB
SYST,2014/04/07 20:46:35,*Writeback: 0 kB
SYST,2014/04/07 20:46:35,*AnonPages: 6000 kB
SYST,2014/04/07 20:46:35,*Mapped: 3976 kB
SYST,2014/04/07 20:46:35,*Slab: 3128 kB
SYST,2014/04/07 20:46:35,*SReclaimable: 800 kB
SYST,2014/04/07 20:46:35,*SUnreclaim: 2328 kB
SYST,2014/04/07 20:46:35,*PageTables: 512 kB
SYST,2014/04/07 20:46:35,*NFS_Unstable: 0 kB
SYST,2014/04/07 20:46:35,*Bounce: 0 kB
SYST,2014/04/07 20:46:35,*CommitLimit: 31444 kB
SYST,2014/04/07 20:46:35,*Committed_AS: 167276 kB
SYST,2014/04/07 20:46:35,*VmallocTotal: 188416 kB
SYST,2014/04/07 20:46:35,*VmallocUsed: 0 kB
SYST,2014/04/07 20:46:35,*VmallocChunk: 188416 kB
SYST,2014/04/07 20:46:35,*Listening network services
SYST,2014/04/07 20:46:35,*tcp 0 0 *:9337-commands *:* LISTEN
SYST,2014/04/07 20:46:35,*tcp 0 0 *:9338-data *:* LISTEN
SYST,2014/04/07 20:46:35,*udp 0 0 *:323 *:*
SYST,2014/04/07 20:46:35,*udp 0 0 *:54361 *:*
SYST,2014/04/07 20:46:35,*udp 0 0 *:mdns *:*
SYST,2014/04/07 20:46:35,*udp 0 0 *:ntp *:*
SYST,2014/04/07 20:46:35,*Data processes
SYST,2014/04/07 20:46:35,*root 643 0.0 2.2 20100 1436 ? Sl Mar25 0:01 /root/bin/COMMANDER
SYST,2014/04/07 20:46:35,*root 647 0.0 2.5 21124 1604 ? Sl Mar25 0:16 /root/bin/SEND_DATA
SYST,2014/04/07 20:46:35,*root 650 0.0 2.2 19960 1388 ? Sl Mar25 0:00 /root/bin/DIO_Rel1
SYST,2014/04/07 20:46:35,*root 654 0.0 2.1 19960 1360 ? Sl Mar25 0:02 /root/bin/HEAT
SYST,2014/04/07 20:46:35,*root 667 0.0 2.2 19960 1396 ? Sl Mar25 0:00 /root/bin/IRIS
SYST,2014/04/07 20:46:35,*root 672 0.0 2.2 19960 1396 ? Sl Mar25 0:01 /root/bin/LILY
SYST,2014/04/07 20:46:35,*root 678 0.0 2.2 19964 1400 ? Sl Mar25 0:12 /root/bin/NANO
SYST,2014/04/07 20:46:35,*root 685 0.0 2.2 19960 1396 ? Sl Mar25 0:00 /root/bin/RESO
SYST,2014/04/07 20:46:35,*root 7860 0.0 0.9 1704 604 ? S 20:17 0:00 grep root/bin
"""
return r'''
(?x) # verbose
(?P<name> SYST)(,)
(?P<date_time> %(date_time)s)(,)
(?P<status> \*BOTPT.*?root/bin)
''' % common_regex_items
| |
"""Support for Sensibo wifi-enabled home thermostats."""
from __future__ import annotations
import asyncio
from aiohttp.client_exceptions import ClientConnectionError
import async_timeout
from pysensibo.exceptions import AuthenticationError, SensiboError
import voluptuous as vol
from homeassistant.components.climate import (
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
ClimateEntity,
)
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_STATE,
ATTR_TEMPERATURE,
CONF_API_KEY,
CONF_ID,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.temperature import convert as convert_temperature
from .const import ALL, DOMAIN, LOGGER, TIMEOUT
from .coordinator import SensiboDataUpdateCoordinator
SERVICE_ASSUME_STATE = "assume_state"
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_ID, default=ALL): vol.All(cv.ensure_list, [cv.string]),
}
)
FIELD_TO_FLAG = {
"fanLevel": SUPPORT_FAN_MODE,
"swing": SUPPORT_SWING_MODE,
"targetTemperature": SUPPORT_TARGET_TEMPERATURE,
}
SENSIBO_TO_HA = {
"cool": HVAC_MODE_COOL,
"heat": HVAC_MODE_HEAT,
"fan": HVAC_MODE_FAN_ONLY,
"auto": HVAC_MODE_HEAT_COOL,
"dry": HVAC_MODE_DRY,
"off": HVAC_MODE_OFF,
}
HA_TO_SENSIBO = {value: key for key, value in SENSIBO_TO_HA.items()}
AC_STATE_TO_DATA = {
"targetTemperature": "target_temp",
"fanLevel": "fan_mode",
"on": "on",
"mode": "hvac_mode",
"swing": "swing_mode",
}
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up Sensibo devices."""
LOGGER.warning(
"Loading Sensibo via platform setup is deprecated; Please remove it from your configuration"
)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config,
)
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Sensibo climate entry."""
coordinator: SensiboDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities = [
SensiboClimate(coordinator, device_id)
for device_id, device_data in coordinator.data.items()
# Remove none climate devices
if device_data["hvac_modes"] and device_data["temp"]
]
async_add_entities(entities)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_ASSUME_STATE,
{
vol.Required(ATTR_STATE): vol.In(["on", "off"]),
},
"async_assume_state",
)
class SensiboClimate(CoordinatorEntity, ClimateEntity):
"""Representation of a Sensibo device."""
coordinator: SensiboDataUpdateCoordinator
def __init__(
self, coordinator: SensiboDataUpdateCoordinator, device_id: str
) -> None:
"""Initiate SensiboClimate."""
super().__init__(coordinator)
self._client = coordinator.client
self._attr_unique_id = device_id
self._attr_name = coordinator.data[device_id]["name"]
self._attr_temperature_unit = (
TEMP_CELSIUS
if coordinator.data[device_id]["temp_unit"] == "C"
else TEMP_FAHRENHEIT
)
self._attr_supported_features = self.get_features()
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, coordinator.data[device_id]["id"])},
name=coordinator.data[device_id]["name"],
connections={(CONNECTION_NETWORK_MAC, coordinator.data[device_id]["mac"])},
manufacturer="Sensibo",
configuration_url="https://home.sensibo.com/",
model=coordinator.data[device_id]["model"],
sw_version=coordinator.data[device_id]["fw_ver"],
hw_version=coordinator.data[device_id]["fw_type"],
suggested_area=coordinator.data[device_id]["name"],
)
def get_features(self) -> int:
"""Get supported features."""
features = 0
for key in self.coordinator.data[self.unique_id]["full_features"]:
if key in FIELD_TO_FLAG:
features |= FIELD_TO_FLAG[key]
return features
@property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self.coordinator.data[self.unique_id]["humidity"]
@property
def hvac_mode(self) -> str:
"""Return hvac operation."""
return (
SENSIBO_TO_HA[self.coordinator.data[self.unique_id]["hvac_mode"]]
if self.coordinator.data[self.unique_id]["on"]
else HVAC_MODE_OFF
)
@property
def hvac_modes(self) -> list[str]:
"""Return the list of available hvac operation modes."""
return [
SENSIBO_TO_HA[mode]
for mode in self.coordinator.data[self.unique_id]["hvac_modes"]
]
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
return convert_temperature(
self.coordinator.data[self.unique_id]["temp"],
TEMP_CELSIUS,
self.temperature_unit,
)
@property
def target_temperature(self) -> float | None:
"""Return the temperature we try to reach."""
return self.coordinator.data[self.unique_id]["target_temp"]
@property
def target_temperature_step(self) -> float | None:
"""Return the supported step of target temperature."""
return self.coordinator.data[self.unique_id]["temp_step"]
@property
def fan_mode(self) -> str | None:
"""Return the fan setting."""
return self.coordinator.data[self.unique_id]["fan_mode"]
@property
def fan_modes(self) -> list[str] | None:
"""Return the list of available fan modes."""
return self.coordinator.data[self.unique_id]["fan_modes"]
@property
def swing_mode(self) -> str | None:
"""Return the swing setting."""
return self.coordinator.data[self.unique_id]["swing_mode"]
@property
def swing_modes(self) -> list[str] | None:
"""Return the list of available swing modes."""
return self.coordinator.data[self.unique_id]["swing_modes"]
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self.coordinator.data[self.unique_id]["temp_list"][0]
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self.coordinator.data[self.unique_id]["temp_list"][-1]
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self.coordinator.data[self.unique_id]["available"] and super().available
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
if (
"targetTemperature"
not in self.coordinator.data[self.unique_id]["active_features"]
):
raise HomeAssistantError(
"Current mode doesn't support setting Target Temperature"
)
if (temperature := kwargs.get(ATTR_TEMPERATURE)) is None:
return
if temperature == self.target_temperature:
return
if temperature not in self.coordinator.data[self.unique_id]["temp_list"]:
# Requested temperature is not supported.
if temperature > self.coordinator.data[self.unique_id]["temp_list"][-1]:
temperature = self.coordinator.data[self.unique_id]["temp_list"][-1]
elif temperature < self.coordinator.data[self.unique_id]["temp_list"][0]:
temperature = self.coordinator.data[self.unique_id]["temp_list"][0]
else:
return
await self._async_set_ac_state_property("targetTemperature", int(temperature))
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
if "fanLevel" not in self.coordinator.data[self.unique_id]["active_features"]:
raise HomeAssistantError("Current mode doesn't support setting Fanlevel")
await self._async_set_ac_state_property("fanLevel", fan_mode)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target operation mode."""
if hvac_mode == HVAC_MODE_OFF:
await self._async_set_ac_state_property("on", False)
return
# Turn on if not currently on.
if not self.coordinator.data[self.unique_id]["on"]:
await self._async_set_ac_state_property("on", True)
await self._async_set_ac_state_property("mode", HA_TO_SENSIBO[hvac_mode])
async def async_set_swing_mode(self, swing_mode: str) -> None:
"""Set new target swing operation."""
if "swing" not in self.coordinator.data[self.unique_id]["active_features"]:
raise HomeAssistantError("Current mode doesn't support setting Swing")
await self._async_set_ac_state_property("swing", swing_mode)
async def async_turn_on(self) -> None:
"""Turn Sensibo unit on."""
await self._async_set_ac_state_property("on", True)
async def async_turn_off(self) -> None:
"""Turn Sensibo unit on."""
await self._async_set_ac_state_property("on", False)
async def _async_set_ac_state_property(
self, name: str, value: str | int | bool, assumed_state: bool = False
) -> None:
"""Set AC state."""
result = {}
try:
async with async_timeout.timeout(TIMEOUT):
result = await self._client.async_set_ac_state_property(
self.unique_id,
name,
value,
self.coordinator.data[self.unique_id]["ac_states"],
assumed_state,
)
except (
ClientConnectionError,
asyncio.TimeoutError,
AuthenticationError,
SensiboError,
) as err:
raise HomeAssistantError(
f"Failed to set AC state for device {self.name} to Sensibo servers: {err}"
) from err
LOGGER.debug("Result: %s", result)
if result["result"]["status"] == "Success":
self.coordinator.data[self.unique_id][AC_STATE_TO_DATA[name]] = value
self.async_write_ha_state()
return
failure = result["result"]["failureReason"]
raise HomeAssistantError(
f"Could not set state for device {self.name} due to reason {failure}"
)
async def async_assume_state(self, state) -> None:
"""Sync state with api."""
await self._async_set_ac_state_property("on", state != HVAC_MODE_OFF, True)
await self.coordinator.async_refresh()
| |
## Python control script for 4x4x4 LED Cube
## John Caswell, 2014
import serial
import time
import struct
import random
import threading
from bottle import route, get, post, request, run, redirect, static_file
## Configuration: set these to match your setup
interface = "0.0.0.0" # Interface to listen on, 0.0.0.0 is all interfaces
port = 8080 # Port to listen on
serialPort = "COM/dev/tty" # Serial port where LED cube is connected
baud = 9600 # Baud rate of cube, 9600 is default
timeout = 15 # How long, in seconds, to run animations in sequences
serverHome = "/path/to/dir/with/LEDCube.py" # Path to installation
## End config
## Globals
matrix = 0x0000000000000000
status = "stopped"
starting = True
modes = ["beamdown", "blink", "bounce", "crazy", "helix", "innerouter",\
"raindrops", "spin", "twinkle", "_sequence", "_random"]
animation = modes[0]
## Attempt to open connection to cube
try:
arduino = serial.Serial(serialPort, baud)
time.sleep(2)
except:
print("Error opening serial port, quitting.")
quit()
## Animation functions. These should perform a single frame of animation
## and then return. They must be named the same as the strings in modes[].
## Recommended that a frame be no longer than 1 second, which feels like
## a reasonable delay with a web-based UI
## Animations with names starting with _ are special. They are playlists,
## which will run other animations. Provided are _sequence and _random, which
## play other animations in sequence, or randomly, respectively. They will skip
## any animation whose name also starts with _, and any new playlists should
## implement this behavior. By convention, place playlists at the end of the list.
def beamdown():
## Light each plane in order, faster and faster until we fill the cube
global starting
if not hasattr(beamdown, "plane"):
beamdown.plane = 0xffff
if not hasattr(beamdown, "sleeptime"):
beamdown.sleeptime = 1
if not hasattr(beamdown, "done"):
beamdown.done = False
if starting:
beamdown.plane = 0xffff
beamdown.sleeptime = 1
starting = False
beamdown.done = False
if beamdown.plane < 0x10000000000000000 and not beamdown.done:
arduino.write(struct.pack("Q", beamdown.plane))
if beamdown.sleeptime > 0.016:
time.sleep(beamdown.sleeptime)
beamdown.plane = beamdown.plane * 0x10000
else:
arduino.write(struct.pack("<Q", 0xffffffffffffffff))
beamdown.done = True
elif not beamdown.done:
beamdown.plane = 0xffff
beamdown.sleeptime = beamdown.sleeptime / 2.0
else:
time.sleep(0.001)
def blink():
if not hasattr(blink, "toggle"):
blink.toggle = True
if blink.toggle:
arduino.write(struct.pack("<Q", 0xffffffffffffffff))
blink.toggle = False
else:
arduino.write(struct.pack("<Q", 0x0000000000000000))
blink.toggle = True
time.sleep(1)
def bounce():
pass
def crazy():
## Just randomly set the states of LEDs
arduino.write(struct.pack("<Q", random.randint(0, 0xffffffffffffffff)))
time.sleep(0.35)
def helix():
## Rotating double helix
global starting
global matrix
if not hasattr(helix, "i"):
helix.i = 0
if not hasattr(helix, "mask"):
helix.mask = [0x8001, 0x810, 0x180, 0x1008, 0x2004, 0x4002]
if starting:
helix.i = 0
starting = False
arduino.write(struct.pack("<Q", matrix))
matrix = matrix >> 16
matrix |= (helix.mask[helix.i] << 48)
helix.i += 1
if helix.i > 5:
helix.i = 0
time.sleep(0.125)
def innerouter():
## Alternate inner and outer cube wireframe
if not hasattr(innerouter, "toggle"):
innerouter.toggle = True
if innerouter.toggle:
arduino.write(struct.pack("<Q", 0x0000066006600000))
innerouter.toggle = False
else:
arduino.write(struct.pack("<Q", 0xf99f90099009f99f))
innerouter.toggle = True
time.sleep(1)
def raindrops():
## Raindrops trickle from the top layer down
global matrix
drops = random.randint(0, 2)
for x in range(0, drops):
matrix += 1 << random.randint(0, 15)
arduino.write(struct.pack("<Q", matrix))
matrix *= 0x10000
matrix &= 0xffffffffffffffff
time.sleep(0.15)
def spin():
pass
def twinkle():
## Twinkle on and off randomly, switching when cube is full / empty
global starting
global matrix
if not hasattr(twinkle, "toggle"):
twinkle.toggle = True
if starting:
twinkle.toggle = True
starting = False
if twinkle.toggle:
if matrix < 0xffffffffffffffff:
twink = 1 << random.randint(0, 63)
if not twink & matrix:
matrix |= twink
arduino.write(struct.pack("<Q", matrix))
time.sleep(0.15)
else:
twinkle.toggle = False
time.sleep(0.5)
else:
if matrix > 0:
twink = 1 << random.randint(0, 63)
if matrix & twink:
matrix &= matrix ^ twink
arduino.write(struct.pack("Q", matrix))
time.sleep(0.15)
else:
twinkle.toggle = True
time.sleep(0.5)
def _sequence():
global starting
if not hasattr(_sequence, "_time"):
_sequence._time = time.time()
if not hasattr(_sequence, "i"):
_sequence.i = 0
if _sequence._time + timeout > time.time():
if modes[_sequence.i][0] == '_':
_sequence.i = _sequence.i + 1
if _sequence.i >= len(modes):
_sequence.i = 0
else:
globals()[modes[_sequence.i]]()
else:
_sequence._time = time.time()
_sequence.i = _sequence.i + 1
starting = True
if _sequence.i >= len(modes):
_sequence.i = 0
def _random():
if not hasattr(_random, "_time"):
_random._time = time.time()
if not hasattr(_random, "i"):
_random.i = random.randint(0, len(modes) - 1)
if _random._time + timeout > time.time():
if modes[_random.i][0] == '_':
_random.i = random.randint(0, len(modes) - 1)
else:
globals()[modes[_random.i]]()
else:
_random._time = time.time()
_random.i = random.randint(0, len(modes) - 1)
## Cube control thread, this handles interfacing with the cube
## It is controlled by the status, starting, animation, and matrix globals
## If status is running, it will call the named animation and yield
def cube():
while True:
if status == "running":
globals()[animation]() # Call the function named in animation
else:
time.sleep(0.001)
## Create the thread to control the cube and start it
t = threading.Thread(target = cube)
t.setDaemon(True)
t.start()
## Handle main control buttons
@route('/start')
def start():
global status
status = "running"
redirect("/")
@route('/stop')
def stop():
global matrix
global status
global starting
starting = True
status = "stopped"
matrix = 0x0000000000000000
arduino.write(struct.pack("<Q", matrix))
redirect("/")
@route('/pause')
def pause():
global status
status = "paused"
redirect("/")
## Add path for images
@route('/images/<filename>')
def images(filename):
return static_file(filename, root=serverHome + "/images/")
## Define main page
@get('/')
def root():
header = '''<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>LED Cube Control</title>
<style type="text/css">body{color:#ffffff}</style>
</head>
<body style="background-color:#000000">
<div align="center">
'''
body = '''<img src="/images/pps.jpg" alt="buttons" usemap="#buttonmap">
<map name="buttonmap">
<area shape="circle" coords="108,112,34" href="/pause" alt="Pause">
<area shape="circle" coords="195,112,34" href="/start" alt="Play">
<area shape="circle" coords="277,112,34" href="/stop" alt="Stop">
</map>
<form action="" method="post">
<select name="animation">
'''
formlist= ""
for mode in modes:
formlist += "<option"
if mode == animation:
formlist += " selected=\"selected\""
formlist += ">" + mode + "</option>\n"
footer='''</select>
<input type="image" src="/images/go.png">
</form>
</div>
</body>
</html>
'''
response = header
response += "current animation: "
response += animation + " "
response += status + "<br>\n"
response += body
response += formlist
response += footer
return response
## Handle animation selection form
@post('/')
def form():
global animation
global matrix
global starting
starting = True
animation = request.forms.get('animation')
matrix = 0x0000000000000000
return start()
## Start HTTP Server
run(host=interface, port=port)
| |
# -*- coding: utf-8 -*-
"""
flaskext.mongoobject
~~~~~~~~~~~~~~~~~~~~
Add basic MongoDB support to your Flask application.
Inspiration:
https://github.com/slacy/minimongo/
https://github.com/mitsuhiko/flask-sqlalchemy
:copyright: (c) 2011 by Daniel, Dao Quang Minh (dqminh).
:license: MIT, see LICENSE for more details.
"""
from __future__ import absolute_import
from bson.dbref import DBRef
from bson.son import SON
from pymongo import Connection
from pymongo.collection import Collection
from pymongo.cursor import Cursor
from pymongo.son_manipulator import AutoReference, NamespaceInjector
from flask import abort
class AttrDict(dict):
"""
Base object that represents a MongoDB document. The object will behave both
like a dict `x['y']` and like an object `x.y`
"""
def __init__(self, initial=None, **kwargs):
# Make sure that during initialization, that we recursively apply
# AttrDict. Maybe this could be better done with the builtin
# defaultdict?
if initial:
for key, value in initial.iteritems():
# Can't just say self[k] = v here b/c of recursion.
self.__setitem__(key, value)
# Process the other arguments (assume they are also default values).
# This is the same behavior as the regular dict constructor.
for key, value in kwargs.iteritems():
self.__setitem__(key, value)
super(AttrDict, self).__init__()
# These lines make this object behave both like a dict (x['y']) and like
# an object (x.y). We have to translate from KeyError to AttributeError
# since model.undefined raises a KeyError and model['undefined'] raises
# a KeyError. we don't ever want __getattr__ to raise a KeyError, so we
# 'translate' them below:
def __getattr__(self, attr):
try:
return super(AttrDict, self).__getitem__(attr)
except KeyError as excn:
raise AttributeError(excn)
def __setattr__(self, attr, value):
try:
# Okay to set directly here, because we're not recursing.
self[attr] = value
except KeyError as excn:
raise AttributeError(excn)
def __delattr__(self, key):
try:
return super(AttrDict, self).__delitem__(key)
except KeyError as excn:
raise AttributeError(excn)
def __setitem__(self, key, value):
new_value = value
# if the nested attribute is not an :class: `AttrDict` already,
# convert it to one
if isinstance(value, dict) and not isinstance(value, AttrDict):
new_value = AttrDict(value)
elif isinstance(value, list):
for i, item in enumerate(value):
if isinstance(item, dict) and not isinstance(item, AttrDict):
value[i] = AttrDict(item)
else:
value[i] = item
return super(AttrDict, self).__setitem__(key, new_value)
class MongoCursor(Cursor):
"""
A cursor that will return an instance of :attr:`as_class` instead of
`dict`
"""
def __init__(self, *args, **kwargs):
self.as_class = kwargs.pop('as_class')
super(MongoCursor, self).__init__(*args, **kwargs)
def next(self):
data = super(MongoCursor, self).next()
return self.as_class(data)
def __getitem__(self, index):
item = super(MongoCursor, self).__getitem__(index)
if isinstance(index, slice):
return item
else:
return self.as_class(item)
class AutoReferenceObject(AutoReference):
"""
Transparently reference and de-reference already saved embedded objects.
This manipulator should probably only be used when the NamespaceInjector is
also being used, otherwise it doesn't make too much sense - documents can
only be auto-referenced if they have an `_ns` field.
If the document should be an instance of a :class:`flaskext.mongoobject.Model`
then we will transform it into a model's instance too.
NOTE: this will behave poorly if you have a circular reference.
TODO: this only works for documents that are in the same database. To fix
this we'll need to add a DatabaseInjector that adds `_db` and then make
use of the optional `database` support for DBRefs.
"""
def __init__(self, mongo):
self.mongo = mongo
self.db = mongo.session
def transform_outgoing(self, son, collection):
def transform_value(value):
if isinstance(value, DBRef):
return transform_value(self.__database.dereference(value))
elif isinstance(value, list):
return [transform_value(v) for v in value]
elif isinstance(value, dict):
if value.get('_ns', None):
# if the collection has a :class:`Model` mapper
cls = self.mongo.mapper.get(value['_ns'], None)
if cls:
return cls(transform_dict(SON(value)))
return transform_dict(SON(value))
return value
def transform_dict(object):
for (key, value) in object.items():
object[key] = transform_value(value)
return object
value = transform_dict(SON(son))
return value
class BaseQuery(Collection):
"""
`BaseQuery` extends :class:`pymongo.Collection` that replaces all results
coming from database with instance of :class:`Model`
"""
def __init__(self, *args, **kwargs):
self.document_class = kwargs.pop('document_class')
super(BaseQuery, self).__init__(*args, **kwargs)
def find_one(self, *args, **kwargs):
kwargs['as_class'] = self.document_class
return super(BaseQuery, self).find_one(*args, **kwargs)
def find(self, *args, **kwargs):
kwargs['as_class'] = self.document_class
return MongoCursor(self, *args, **kwargs)
def find_and_modify(self, *args, **kwargs):
kwargs['as_class'] = self.document_class
return super(BaseQuery, self).find_and_modify(*args, **kwargs)
def get_or_404(self, id):
item = self.find_one(id, as_class=self.document_class)
if not item:
abort(404)
return item
class _QueryProperty(object):
"""
Represent :attr:`Model.query` that dynamically instantiate
:attr:`Model.query_class` so that we can do things like
`Model.query.find_one`
"""
def __init__(self, mongo):
self.mongo = mongo
def __get__(self, instance, owner):
return owner.query_class(database=self.mongo.session,
name=owner.__collection__,
document_class=owner)
class Model(AttrDict):
"""
Base class for custom user models. Provide convenience ActiveRecord
methods such as :attr:`save`, :attr:`remove`
"""
#: Query class
query_class = BaseQuery
#: instance of :attr:`query_class`
query = None
#: name of this model collection
__collection__ = None
@property
def id(self):
if getattr(self, "_id", None):
return str(self._id)
def __init__(self, *args, **kwargs):
assert 'query_class' not in kwargs
assert 'query' not in kwargs
assert '__collection__' not in kwargs
super(Model, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
self.query.save(self, *args, **kwargs)
return self
def update(self, *args, **kwargs):
self.query.update({"_id": self._id}, self, *args, **kwargs)
return self
def remove(self):
return self.query.remove(self._id)
def __str__(self):
return '%s(%s)' % (self.__class__.__name__,
super(Model, self).__str__())
def __unicode__(self):
return str(self).decode('utf-8')
class MongoObject(object):
def __init__(self, app=None):
if app is not None:
self.app = app
self.init_app(app)
self.Model = self.make_model()
self.mapper = {}
def init_app(self, app):
app.config.setdefault('MONGODB_HOST', "mongodb://localhost:27017")
app.config.setdefault('MONGODB_DATABASE', "")
app.config.setdefault('MONGODB_AUTOREF', True)
# initialize connection and Model properties
self.app = app
self.connect()
self.app.after_request(self.close_connection)
def connect(self):
self.connection = Connection(self.app.config['MONGODB_HOST'])
def init_connection(self):
self.connection = Connection(self.app.config['MONGODB_HOST'])
def make_model(self):
model = Model
model.query = _QueryProperty(self)
return model
@property
def session(self):
if not getattr(self, "db", None):
self.db = self.connection[self.app.config['MONGODB_DATABASE']]
if self.app.config['MONGODB_AUTOREF']:
self.db.add_son_manipulator(NamespaceInjector())
self.db.add_son_manipulator(AutoReferenceObject(self))
return self.db
def set_mapper(self, model):
# Set up mapper for model, so when ew retrieve documents from database,
# we will know how to map them to model object based on `_ns` fields
self.mapper[model.__collection__] = model
def close_connection(self, response):
self.connection.end_request()
return response
def clear(self):
self.connection.drop_database(self.app.config['MONGODB_DATABASE'])
self.connection.end_request()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variables. See the @{$python/state_ops} guide."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import gen_state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_state_ops import *
from tensorflow.python.util.tf_export import tf_export
# pylint: enable=wildcard-import
# pylint: disable=protected-access,g-doc-return-or-yield,g-doc-args
def variable_op(shape, dtype, name="Variable", set_shape=True, container="",
shared_name=""):
"""Deprecated. Used variable_op_v2 instead."""
if not set_shape:
shape = tensor_shape.unknown_shape()
ret = gen_state_ops.variable(shape=shape, dtype=dtype, name=name,
container=container, shared_name=shared_name)
# TODO(mrry): Move this to where it is used, so we can get rid of this op
# wrapper?
if set_shape:
ret.set_shape(shape)
return ret
def variable_op_v2(shape, dtype, name="Variable", container="", shared_name=""):
"""Create a variable Operation.
See also variables.Variable.
Args:
shape: The shape of the tensor managed by this variable
dtype: The underlying type of the tensor values.
name: optional name to use for the variable op.
container: An optional string. Defaults to "".
If non-empty, this variable is placed in the given container.
Otherwise, a default container is used.
shared_name: An optional string. Defaults to "".
If non-empty, this variable is named in the given bucket
with this shared_name. Otherwise, the node name is used instead.
Returns:
A variable tensor.
"""
return gen_state_ops.variable_v2(
shape=shape,
dtype=dtype,
name=name,
container=container,
shared_name=shared_name)
def init_variable(v, init, name="init"):
"""Initializes variable with "init".
This op does the following:
if init is a Tensor, v = init
if callable(init): v = init(VariableShape(v), v.dtype)
Args:
v: Variable to initialize
init: Tensor to assign to v,
Or an object convertible to Tensor e.g. nparray,
Or an Initializer that generates a tensor given the shape and type of v.
An "Initializer" is a callable that returns a tensor that "v" should be
set to. It will be called as init(shape, dtype).
name: Optional name for the op.
Returns:
The operation that initializes v.
"""
with ops.name_scope(None, v.op.name + "/", [v, init]):
with ops.name_scope(name) as scope:
with ops.colocate_with(v):
if callable(init):
assert v.get_shape().is_fully_defined(), "Variable shape unknown."
# TODO(mrry): Convert to v.shape when the property and
# accessor are reconciled (and all initializers support
# tf.TensorShape objects).
value = init(v.get_shape().as_list(), v.dtype.base_dtype)
value = ops.convert_to_tensor(value, name="value")
return gen_state_ops.assign(v, value, name=scope)
else:
init = ops.convert_to_tensor(init, name="init")
return gen_state_ops.assign(v, init, name=scope)
def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
return ref.is_initialized(name=name)
@tf_export("assign_sub")
def assign_sub(ref, value, use_locking=None, name=None):
"""Update 'ref' by subtracting 'value' from it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be subtracted to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_sub(
ref, value, use_locking=use_locking, name=name)
return ref.assign_sub(value)
@tf_export("assign_add")
def assign_add(ref, value, use_locking=None, name=None):
"""Update 'ref' by adding 'value' to it.
This operation outputs "ref" after the update is done.
This makes it easier to chain operations that need to use the reset value.
Args:
ref: A mutable `Tensor`. Must be one of the following types:
`float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
`int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
Should be from a `Variable` node.
value: A `Tensor`. Must have the same type as `ref`.
The value to be added to the variable.
use_locking: An optional `bool`. Defaults to `False`.
If True, the addition will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as "ref". Returned as a convenience for operations that want
to use the new value after the variable has been updated.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign_add(
ref, value, use_locking=use_locking, name=name)
return ref.assign_add(value)
@tf_export("assign")
def assign(ref, value, validate_shape=None, use_locking=None, name=None):
"""Update 'ref' by assigning 'value' to it.
This operation outputs a Tensor that holds the new value of 'ref' after
the value has been assigned. This makes it easier to chain operations
that need to use the reset value.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
value: A `Tensor`. Must have the same type as `ref`.
The value to be assigned to the variable.
validate_shape: An optional `bool`. Defaults to `True`.
If true, the operation will validate that the shape
of 'value' matches the shape of the Tensor being assigned to. If false,
'ref' will take on the shape of 'value'.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A `Tensor` that will hold the new value of 'ref' after
the assignment has completed.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.assign(
ref, value, use_locking=use_locking, name=name,
validate_shape=validate_shape)
return ref.assign(value, name=name)
@tf_export("count_up_to")
def count_up_to(ref, limit, name=None):
r"""Increments 'ref' until it reaches 'limit'.
Args:
ref: A Variable. Must be one of the following types: `int32`, `int64`.
Should be from a scalar `Variable` node.
limit: An `int`.
If incrementing ref would bring it above limit, instead generates an
'OutOfRange' error.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `ref`.
A copy of the input before increment. If nothing else modifies the
input, the values produced will all be distinct.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.count_up_to(ref, limit=limit, name=name)
return gen_state_ops.resource_count_up_to(
ref.handle, limit, T=ref.dtype, name=name)
@tf_export("scatter_update")
def scatter_update(ref, indices, updates, use_locking=True, name=None):
# pylint: disable=line-too-long
r"""Applies sparse updates to a variable reference.
This operation computes
```python
# Scalar indices
ref[indices, ...] = updates[...]
# Vector indices (for each i)
ref[indices[i], ...] = updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
If values in `ref` is to be updated more than once, because there are
duplicate entries in `indices`, the order at which the updates happen
for each value is undefined.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt>
</div>
Args:
ref: A `Variable`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `True`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_update(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_update( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export("scatter_nd_update")
def scatter_nd_update(ref, indices, updates, use_locking=True, name=None):
r"""Applies sparse `updates` to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to update 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
update = tf.scatter_nd_update(ref, indices, updates)
with tf.Session() as sess:
print sess.run(update)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
ref: A Variable.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A Tensor. Must have the same type as ref. A tensor of updated
values to add to ref.
use_locking: An optional `bool`. Defaults to `True`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
The value of the variable after the update.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_update(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_update( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export("scatter_add")
def scatter_add(ref, indices, updates, use_locking=False, name=None):
# pylint: disable=line-too-long
r"""Adds sparse updates to the variable referenced by `resource`.
This operation computes
```python
# Scalar indices
ref[indices, ...] += updates[...]
# Vector indices (for each i)
ref[indices[i], ...] += updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the updated value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt>
</div>
Args:
ref: A `Variable`.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to store in `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the assignment will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
Same as `ref`. Returned as a convenience for operations that want
to use the updated values after the update is done.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_add(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_add( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export("scatter_nd_add")
def scatter_nd_add(ref, indices, updates, use_locking=False, name=None):
r"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = tf.scatter_nd_add(ref, indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. A mutable Tensor. Should be from a Variable node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into ref.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to add to ref.
use_locking: An optional `bool`. Defaults to `False`.
An optional bool. Defaults to True. If True, the assignment will
be protected by a lock; otherwise the behavior is undefined,
but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_nd_add(
ref, indices, updates, use_locking, name)
return ref._lazy_read(gen_state_ops.resource_scatter_nd_add( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
@tf_export("scatter_sub")
def scatter_sub(ref, indices, updates, use_locking=False, name=None):
r"""Subtracts sparse updates to a variable reference.
```python
# Scalar indices
ref[indices, ...] -= updates[...]
# Vector indices (for each i)
ref[indices[i], ...] -= updates[i, ...]
# High rank indices (for each i, ..., j)
ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
```
This operation outputs `ref` after the update is done.
This makes it easier to chain operations that need to use the reset value.
Duplicate entries are handled correctly: if multiple `indices` reference
the same location, their (negated) contributions add.
Requires `updates.shape = indices.shape + ref.shape[1:]` or
`updates.shape = []`.
<div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;">
<img style="width:100%"
src="https://www.tensorflow.org/images/ScatterSub.png" alt>
</div>
Args:
ref: A mutable `Tensor`. Must be one of the following types: `float32`,
`float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`,
`qint8`, `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`,
`uint32`, `uint64`. Should be from a `Variable` node.
indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
A tensor of indices into the first dimension of `ref`.
updates: A `Tensor`. Must have the same type as `ref`.
A tensor of updated values to subtract from `ref`.
use_locking: An optional `bool`. Defaults to `False`.
If True, the subtraction will be protected by a lock;
otherwise the behavior is undefined, but may exhibit less contention.
name: A name for the operation (optional).
Returns:
A mutable `Tensor`. Has the same type as `ref`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.scatter_sub(ref, indices, updates,
use_locking=use_locking, name=name)
return ref._lazy_read(gen_resource_variable_ops.resource_scatter_sub( # pylint: disable=protected-access
ref.handle, indices, ops.convert_to_tensor(updates, ref.dtype),
name=name))
| |
from datetime import date
import dateutil
import numpy as np
import pytest
from pandas.compat import lrange
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, Index, Timestamp, date_range, offsets)
import pandas.util.testing as tm
from pandas.util.testing import assert_almost_equal
randn = np.random.randn
class TestDatetimeIndex(object):
def test_roundtrip_pickle_with_tz(self):
# GH 8367
# round-trip of timezone
index = date_range('20130101', periods=3, tz='US/Eastern', name='foo')
unpickled = tm.round_trip_pickle(index)
tm.assert_index_equal(index, unpickled)
def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):
# GH7774
index = date_range('20130101', periods=3, tz='US/Eastern')
assert str(index.reindex([])[0].tz) == 'US/Eastern'
assert str(index.reindex(np.array([]))[0].tz) == 'US/Eastern'
def test_time_loc(self): # GH8667
from datetime import time
from pandas._libs.index import _SIZE_CUTOFF
ns = _SIZE_CUTOFF + np.array([-100, 100], dtype=np.int64)
key = time(15, 11, 30)
start = key.hour * 3600 + key.minute * 60 + key.second
step = 24 * 3600
for n in ns:
idx = pd.date_range('2014-11-26', periods=n, freq='S')
ts = pd.Series(np.random.randn(n), index=idx)
i = np.arange(start, n, step)
tm.assert_numpy_array_equal(ts.index.get_loc(key), i,
check_dtype=False)
tm.assert_series_equal(ts[key], ts.iloc[i])
left, right = ts.copy(), ts.copy()
left[key] *= -10
right.iloc[i] *= -10
tm.assert_series_equal(left, right)
def test_time_overflow_for_32bit_machines(self):
# GH8943. On some machines NumPy defaults to np.int32 (for example,
# 32-bit Linux machines). In the function _generate_regular_range
# found in tseries/index.py, `periods` gets multiplied by `strides`
# (which has value 1e9) and since the max value for np.int32 is ~2e9,
# and since those machines won't promote np.int32 to np.int64, we get
# overflow.
periods = np.int_(1000)
idx1 = pd.date_range(start='2000', periods=periods, freq='S')
assert len(idx1) == periods
idx2 = pd.date_range(end='2000', periods=periods, freq='S')
assert len(idx2) == periods
def test_nat(self):
assert DatetimeIndex([np.nan])[0] is pd.NaT
def test_week_of_month_frequency(self):
# GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise
d1 = date(2002, 9, 1)
d2 = date(2013, 10, 27)
d3 = date(2012, 9, 30)
idx1 = DatetimeIndex([d1, d2])
idx2 = DatetimeIndex([d3])
result_append = idx1.append(idx2)
expected = DatetimeIndex([d1, d2, d3])
tm.assert_index_equal(result_append, expected)
result_union = idx1.union(idx2)
expected = DatetimeIndex([d1, d3, d2])
tm.assert_index_equal(result_union, expected)
# GH 5115
result = date_range("2013-1-1", periods=4, freq='WOM-1SAT')
dates = ['2013-01-05', '2013-02-02', '2013-03-02', '2013-04-06']
expected = DatetimeIndex(dates, freq='WOM-1SAT')
tm.assert_index_equal(result, expected)
def test_hash_error(self):
index = date_range('20010101', periods=10)
with pytest.raises(TypeError, match=("unhashable type: %r" %
type(index).__name__)):
hash(index)
def test_stringified_slice_with_tz(self):
# GH#2658
import datetime
start = datetime.datetime.now()
idx = date_range(start=start, freq="1d", periods=10)
df = DataFrame(lrange(10), index=idx)
df["2013-01-14 23:44:34.437768-05:00":] # no exception here
def test_append_join_nondatetimeindex(self):
rng = date_range('1/1/2000', periods=10)
idx = Index(['a', 'b', 'c', 'd'])
result = rng.append(idx)
assert isinstance(result[0], Timestamp)
# it works
rng.join(idx, how='outer')
def test_map(self):
rng = date_range('1/1/2000', periods=10)
f = lambda x: x.strftime('%Y%m%d')
result = rng.map(f)
exp = Index([f(x) for x in rng], dtype='<U8')
tm.assert_index_equal(result, exp)
def test_map_fallthrough(self, capsys):
# GH#22067, check we don't get warnings about silently ignored errors
dti = date_range('2017-01-01', '2018-01-01', freq='B')
dti.map(lambda x: pd.Period(year=x.year, month=x.month, freq='M'))
captured = capsys.readouterr()
assert captured.err == ''
def test_iteration_preserves_tz(self):
# see gh-8890
index = date_range("2012-01-01", periods=3, freq='H', tz='US/Eastern')
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result == expected
index = date_range("2012-01-01", periods=3, freq='H',
tz=dateutil.tz.tzoffset(None, -28800))
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
# 9100
index = pd.DatetimeIndex(['2014-12-01 03:32:39.987000-08:00',
'2014-12-01 04:12:34.987000-08:00'])
for i, ts in enumerate(index):
result = ts
expected = index[i]
assert result._repr_base == expected._repr_base
assert result == expected
@pytest.mark.parametrize('periods', [0, 9999, 10000, 10001])
def test_iteration_over_chunksize(self, periods):
# GH21012
index = date_range('2000-01-01 00:00:00', periods=periods, freq='min')
num = 0
for stamp in index:
assert index[num] == stamp
num += 1
assert num == len(index)
def test_misc_coverage(self):
rng = date_range('1/1/2000', periods=5)
result = rng.groupby(rng.day)
assert isinstance(list(result.values())[0][0], Timestamp)
idx = DatetimeIndex(['2000-01-03', '2000-01-01', '2000-01-02'])
assert not idx.equals(list(idx))
non_datetime = Index(list('abc'))
assert not idx.equals(list(non_datetime))
def test_string_index_series_name_converted(self):
# #1644
df = DataFrame(np.random.randn(10, 4),
index=date_range('1/1/2000', periods=10))
result = df.loc['1/3/2000']
assert result.name == df.index[2]
result = df.T['1/3/2000']
assert result.name == df.index[2]
def test_get_duplicates(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-02',
'2000-01-03', '2000-01-03', '2000-01-04'])
with tm.assert_produces_warning(FutureWarning):
# Deprecated - see GH20239
result = idx.get_duplicates()
ex = DatetimeIndex(['2000-01-02', '2000-01-03'])
tm.assert_index_equal(result, ex)
def test_argmin_argmax(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_sort_values(self):
idx = DatetimeIndex(['2000-01-04', '2000-01-01', '2000-01-02'])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))
def test_map_bug_1677(self):
index = DatetimeIndex(['2012-04-25 09:30:00.393000'])
f = index.asof
result = index.map(f)
expected = Index([f(index[0])])
tm.assert_index_equal(result, expected)
def test_groupby_function_tuple_1677(self):
df = DataFrame(np.random.rand(100),
index=date_range("1/1/2000", periods=100))
monthly_group = df.groupby(lambda x: (x.year, x.month))
result = monthly_group.mean()
assert isinstance(result.index[0], tuple)
def test_append_numpy_bug_1681(self):
# another datetime64 bug
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
a = DataFrame()
c = DataFrame({'A': 'foo', 'B': dr}, index=dr)
result = a.append(c)
assert (result['B'] == dr).all()
def test_isin(self):
index = tm.makeDateIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
assert_almost_equal(index.isin([index[2], 5]),
np.array([False, False, True, False]))
def test_does_not_convert_mixed_integer(self):
df = tm.makeCustomDataframe(10, 10,
data_gen_f=lambda *args, **kwargs: randn(),
r_idx_type='i', c_idx_type='dt')
cols = df.columns.join(df.index, how='outer')
joined = cols.join(df.columns)
assert cols.dtype == np.dtype('O')
assert cols.dtype == joined.dtype
tm.assert_numpy_array_equal(cols.values, joined.values)
def test_join_self(self, join_type):
index = date_range('1/1/2000', periods=10)
joined = index.join(index, how=join_type)
assert index is joined
def assert_index_parameters(self, index):
assert index.freq == '40960N'
assert index.inferred_freq == '40960N'
def test_ns_index(self):
nsamples = 400
ns = int(1e9 / 24414)
dtstart = np.datetime64('2012-09-20T00:00:00')
dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, 'ns')
freq = ns * offsets.Nano()
index = pd.DatetimeIndex(dt, freq=freq, name='time')
self.assert_index_parameters(index)
new_index = pd.date_range(start=index[0], end=index[-1],
freq=index.freq)
self.assert_index_parameters(new_index)
def test_join_with_period_index(self, join_type):
df = tm.makeCustomDataframe(
10, 10, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:5, 0]
msg = 'can only call with other PeriodIndex-ed objects'
with pytest.raises(ValueError, match=msg):
df.columns.join(s.index, how=join_type)
def test_factorize(self):
idx1 = DatetimeIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# tz must be preserved
idx1 = idx1.tz_localize('Asia/Tokyo')
exp_idx = exp_idx.tz_localize('Asia/Tokyo')
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
idx2 = pd.DatetimeIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'])
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-01', '2014-02', '2014-03'])
arr, idx = idx2.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)
exp_idx = DatetimeIndex(['2014-03', '2014-02', '2014-01'])
arr, idx = idx2.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = date_range('2000-01', periods=4, freq='M', tz='Asia/Tokyo')
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_factorize_tz(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH#13750
base = pd.date_range('2016-11-05', freq='H', periods=100, tz=tz)
idx = base.repeat(5)
exp_arr = np.arange(100, dtype=np.intp).repeat(5)
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(res, base)
def test_factorize_dst(self):
# GH 13750
idx = pd.date_range('2016-11-06', freq='H', periods=12,
tz='US/Eastern')
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
idx = pd.date_range('2016-06-13', freq='H', periods=12,
tz='US/Eastern')
for obj in [idx, pd.Series(idx)]:
arr, res = obj.factorize()
tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))
tm.assert_index_equal(res, idx)
@pytest.mark.parametrize('arr, expected', [
(pd.DatetimeIndex(['2017', '2017']), pd.DatetimeIndex(['2017'])),
(pd.DatetimeIndex(['2017', '2017'], tz='US/Eastern'),
pd.DatetimeIndex(['2017'], tz='US/Eastern')),
])
def test_unique(self, arr, expected):
result = arr.unique()
tm.assert_index_equal(result, expected)
# GH 21737
# Ensure the underlying data is consistent
assert result[0] == expected[0]
def test_asarray_tz_naive(self):
# This shouldn't produce a warning.
idx = pd.date_range('2000', periods=2)
# M8[ns] by default
with tm.assert_produces_warning(None):
result = np.asarray(idx)
expected = np.array(['2000-01-01', '2000-01-02'], dtype='M8[ns]')
tm.assert_numpy_array_equal(result, expected)
# optionally, object
with tm.assert_produces_warning(None):
result = np.asarray(idx, dtype=object)
expected = np.array([pd.Timestamp('2000-01-01'),
pd.Timestamp('2000-01-02')])
tm.assert_numpy_array_equal(result, expected)
def test_asarray_tz_aware(self):
tz = 'US/Central'
idx = pd.date_range('2000', periods=2, tz=tz)
expected = np.array(['2000-01-01T06', '2000-01-02T06'], dtype='M8[ns]')
# We warn by default and return an ndarray[M8[ns]]
with tm.assert_produces_warning(FutureWarning):
result = np.asarray(idx)
tm.assert_numpy_array_equal(result, expected)
# Old behavior with no warning
with tm.assert_produces_warning(None):
result = np.asarray(idx, dtype="M8[ns]")
tm.assert_numpy_array_equal(result, expected)
# Future behavior with no warning
expected = np.array([pd.Timestamp("2000-01-01", tz=tz),
pd.Timestamp("2000-01-02", tz=tz)])
with tm.assert_produces_warning(None):
result = np.asarray(idx, dtype=object)
tm.assert_numpy_array_equal(result, expected)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import socket
import logging
import struct
import errno
import random
from shadowsocks import encrypt, eventloop, lru_cache, common, shell
from shadowsocks.common import parse_header, pack_addr, onetimeauth_verify, \
onetimeauth_gen, ONETIMEAUTH_BYTES, ADDRTYPE_AUTH
BUF_SIZE = 65536
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
if 'one_time_auth' in config and config['one_time_auth']:
self._one_time_auth_enable = True
else:
self._one_time_auth_enable = False
self._is_local = is_local
self._cache = lru_cache.LRUCache(timeout=config['timeout'],
close_callback=self._close_client)
self._client_fd_to_server_addr = \
lru_cache.LRUCache(timeout=config['timeout'])
self._dns_cache = lru_cache.LRUCache(timeout=300)
self._eventloop = None
self._closed = False
self._sockets = set()
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("UDP can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _close_client(self, client):
if hasattr(client, 'close'):
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
client.close()
else:
# just an address
pass
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
key = None
iv = None
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('UDP drop a message since frag is not 0')
return
else:
data = data[3:]
else:
data, key, iv = encrypt.dencrypt_all(self._password,
self._method,
data)
# decrypt data
if not data:
logging.debug(
'UDP handle_server: data is empty after decrypt'
)
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
# spec https://shadowsocks.org/en/spec/one-time-auth.html
if self._one_time_auth_enable or addrtype & ADDRTYPE_AUTH:
if len(data) < header_length + ONETIMEAUTH_BYTES:
logging.warn('UDP one time auth header is too short')
return
_hash = data[-ONETIMEAUTH_BYTES:]
data = data[: -ONETIMEAUTH_BYTES]
_key = iv + key
if onetimeauth_verify(_hash, data, _key) is False:
logging.warn('UDP one time auth fail')
return
addrs = self._dns_cache.get(server_addr, None)
if addrs is None:
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
# drop
return
else:
self._dns_cache[server_addr] = addrs
af, socktype, proto, canonname, sa = addrs[0]
key = client_key(r_addr, af)
client = self._cache.get(key, None)
if not client:
# TODO async getaddrinfo
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' %
common.to_str(sa[0]))
# drop
return
client = socket.socket(af, socktype, proto)
client.setblocking(False)
self._cache[key] = client
self._client_fd_to_server_addr[client.fileno()] = r_addr
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
if self._is_local:
key, iv, m = encrypt.gen_key_iv(self._password, self._method)
# spec https://shadowsocks.org/en/spec/one-time-auth.html
if self._one_time_auth_enable:
data = self._ota_chunk_data_gen(key, iv, data)
data = encrypt.encrypt_all_m(key, iv, m, self._method, data)
if not data:
return
else:
data = data[header_length:]
if not data:
return
try:
client.sendto(data, (server_addr, server_port))
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if sys.platform == "win32":
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK, errno.WSAEWOULDBLOCK):
pass
else:
shell.print_exception(e)
elif error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
response = encrypt.encrypt_all(self._password, self._method, 1,
data)
if not response:
return
else:
data = encrypt.encrypt_all(self._password, self._method, 0,
data)
if not data:
return
header_result = parse_header(data)
if header_result is None:
return
addrtype, dest_addr, dest_port, header_length = header_result
response = b'\x00\x00\x00' + data
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
if client_addr:
self._server_socket.sendto(response, client_addr)
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def _ota_chunk_data_gen(self, key, iv, data):
data = common.chr(common.ord(data[0]) | ADDRTYPE_AUTH) + data[1:]
key = iv + key
return data + onetimeauth_gen(data, key)
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
self._handle_server()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
self._handle_client(sock)
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._server_socket.close()
self._server_socket = None
for sock in self._sockets:
sock.close()
logging.info('closed UDP port %d', self._listen_port)
self._cache.sweep()
self._client_fd_to_server_addr.sweep()
self._dns_cache.sweep()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for client in list(self._cache.values()):
client.close()
| |
#!/usr/bin/env python
import argparse
import os
import sys
import json
import logging
import shutil
import utils
import re
LOGGER = None
def merge_results(outdir, varsim_tp, varsim_fn, vcfeval_tp,
varsim_fp, vcfeval_tp_predict):
'''
generate augmented TP, FN, FP
:param varsim_tp:
:param varsim_fn:
:param vcfeval_tp:
:param varsim_fp:
:param vcfeval_tp_predict:
:return:
'''
#some implementation philosiphy (subject to change)
#retain any variant recognized by VarSim (they might not be recognized by vcfeval, e.g. <DUP>)
#assume variants are uniquely identifiable by chr+pos+ref+alt
#varsim_tp + vcfeval_tp = augmented_tp
#varsim_tp + varsim_fn = T
#T - augmented_tp = augmented_fn
#varsim_fp - vcfeval_tp_predict = augmented_fp
augmented_tp = os.path.join(outdir, "merge_tp.vcf")
augmented_t = os.path.join(outdir, "merge_t.vcf")
augmented_fn = os.path.join(outdir, "merge_fn.vcf")
augmented_fp = os.path.join(outdir, "merge_fp.vcf")
augmented_tp = utils.combine_vcf(augmented_tp, [varsim_tp, vcfeval_tp], duplicate_handling_mode=utils.COMBINE_KEEP_FIRST_DUPLICATE)
augmented_t = utils.combine_vcf(augmented_t, [varsim_tp, varsim_fn], duplicate_handling_mode=utils.COMBINE_KEEP_FIRST_DUPLICATE)
#assumption: augmented_tp is subset of augmented_t
augmented_fn = utils.combine_vcf(augmented_fn, [augmented_t, augmented_tp], duplicate_handling_mode=utils.COMBINE_KEEP_NO_DUPLICATE)
#assumption: vcfeval_tp_predict is subset of varsim_fp
augmented_fp = utils.combine_vcf(augmented_fp, [varsim_fp, vcfeval_tp_predict], duplicate_handling_mode=utils.COMBINE_KEEP_NO_DUPLICATE)
return augmented_tp, augmented_fn, augmented_fp, augmented_t
class VCFComparator(object):
def __init__(self, prefix, true_vcf, reference, regions, sample, vcfs, exclude_filtered, match_geno, log_to_file, opts, java = "java"):
self.prefix = prefix
self.true_vcf = true_vcf
self.reference = reference
self.sample = sample
self.vcfs = vcfs
self.exclude_filtered = exclude_filtered
self.match_geno = match_geno
self.log_to_file = log_to_file
self.regions = regions
self.opts = opts #additional options
self.tp,self.tp_predict,self.fp,self.fn = None, None, None, None
self.java = java
def run(self):
'''
generate TP, FN, FP
:return:
'''
pass
def get_tp(self):
'''
:return: TP (based on truth) file
'''
if not self.tp:
self.run()
return self.tp
def get_tp_predict(self):
'''
:return: TP (based on prediction) file
'''
if not self.tp_predict:
self.run()
return self.tp_predict
def get_fp(self):
'''
:return: FP file
'''
if not self.fp:
self.run()
return self.fp
def get_fn(self):
'''
:return: FN file
'''
if not self.fn:
self.run()
return self.fn
class VarSimVCFComparator(VCFComparator):
def __init__(self, prefix, true_vcf, reference, regions, sample, vcfs, exclude_filtered, disallow_partial_fp, match_geno, log_to_file, opts, java = 'java', sv_length = 100,
ignore_ins_len = False):
VCFComparator.__init__(self, prefix, true_vcf, reference, regions, sample, vcfs, exclude_filtered, match_geno, log_to_file, opts, java)
self.disallow_partial_fp = disallow_partial_fp
self.sv_length = sv_length
self.ignore_ins_len = ignore_ins_len
def get_tp_predict(self):
'''
varsim does not generate TP based off of predictions
:return:
'''
return None
def run(self):
'''
:return:
'''
cmd = [self.java, utils.JAVA_XMX, '-jar', utils.VARSIMJAR, 'vcfcompare',
'-prefix', self.prefix, '-true_vcf',
self.true_vcf,
'-reference', self.reference,
]
if self.exclude_filtered:
cmd.append('-exclude_filtered')
if self.match_geno:
cmd.append('-match_geno')
if self.sample:
cmd.append('-sample')
cmd.append(self.sample)
if self.regions:
cmd.append('-bed')
cmd.append(self.regions)
if self.disallow_partial_fp:
cmd.append('-disallow_partial_fp')
if str(self.sv_length):
cmd.append('-sv_length {}'.format(self.sv_length))
if self.ignore_ins_len:
cmd.append('-ignore_ins_len')
if self.opts:
cmd.append(self.opts)
cmd.extend(self.vcfs)
if self.log_to_file:
with utils.versatile_open(self.log_to_file, 'a') as logout:
utils.run_shell_command(cmd, sys.stdout, logout)
else:
utils.run_shell_command(cmd, sys.stdout, sys.stderr)
tp = self.prefix + '_TP.vcf'
fn = self.prefix + '_FN.vcf'
fp = self.prefix + '_FP.vcf'
for i in (tp, fn, fp):
if not os.path.exists(i):
raise Exception('{0} was not generated by VarSim vcfcompare. Please check and rerun.'.format(i))
self.tp, self.fn, self.fp = tp, fn, fp
class RTGVCFComparator(VCFComparator):
def run(self):
'''
:return:
'''
#command example
#rtg-tools-3.8.4-bdba5ea_install/rtg vcfeval --baseline truth.vcf.gz \
#--calls compare1.vcf.gz -o vcfeval_split_snp -t ref.sdf --output-mode=annotate --sample xx --squash-ploidy --regions ?? \
cmd = [self.java, utils.JAVA_XMX, '-jar', utils.RTGJAR, 'vcfeval',
'-o', self.prefix, '--baseline',
self.true_vcf,
'-t', self.reference,
]
if not self.exclude_filtered:
cmd.append('--all-records')
if not self.match_geno:
cmd.append('--squash-ploidy')
if self.sample:
cmd.append('--sample')
cmd.append(self.sample)
if self.regions:
cmd.append('--bed-regions')
cmd.append(self.regions)
if self.opts:
cmd.append(self.opts)
if len(self.vcfs) != 1:
raise ValueError('vcfeval only takes 1 prediction VCF and 1 truth VCF: {0}'.format(self.vcfs))
cmd.append('--calls')
cmd.append(self.vcfs[0])
tp = os.path.join(self.prefix, 'tp-baseline.vcf.gz')
tp_predict = os.path.join(self.prefix, 'tp.vcf.gz')
fn = os.path.join(self.prefix, 'fn.vcf.gz')
fp = os.path.join(self.prefix, 'fp.vcf.gz')
#vcfeval refuses to run if true_vcf contains 0 variants
if utils.count_variants(self.true_vcf) == 0:
utils.makedirs([self.prefix])
#because there is 0 ground truth variants, TP and FN will be empty
shutil.copyfile(self.true_vcf, tp)
shutil.copyfile(self.true_vcf, fn)
if utils.count_variants(self.vcfs[0]) == 0:
#if calls are empty, then TP_PREDICT and FP will for sure be empty
shutil.copyfile(self.vcfs[0], tp_predict)
shutil.copyfile(self.vcfs[0], fp)
else:
#if calls are not empty, then all calls will be FP due to 0 ground truth, TP_PREDICT will be empty
shutil.copyfile(self.vcfs[0], fp)
with utils.versatile_open(tp_predict, "w") as output, utils.versatile_open(self.vcfs[0], "r") as input:
for i in input:
if i.startswith('#'):
output.write(i)
else:
break
else:
if self.log_to_file:
with utils.versatile_open(self.log_to_file, 'a') as logout:
utils.run_shell_command(cmd, sys.stderr, logout)
else:
utils.run_shell_command(cmd, sys.stderr, sys.stderr)
for i in (tp, tp_predict, fn, fp):
if not os.path.exists(i):
raise Exception('{0} was not generated by vcfeval. Please check and rerun.'.format(i))
self.tp, self.tp_predict, self.fn, self.fp = tp, tp_predict, fn, fp
def generate_sdf(reference, log, java = 'java'):
'''
take reference and generate SDF
:param reference:
:return:
'''
sdf = reference + '.sdf'
if os.path.exists(sdf):
LOGGER.info('{0} exists, doing nothing'.format(sdf))
LOGGER.info('to rerun SDF generation, please remove or rename {0}'.format(sdf))
return sdf
cmd = [java, utils.JAVA_XMX, '-jar',utils.RTGJAR,'format',
'-o', sdf, reference]
if log:
with utils.versatile_open(log, 'a') as logout:
utils.run_shell_command(cmd, logout, logout)
else:
utils.run_shell_command(cmd, sys.stdout, sys.stderr)
return sdf
def process(args):
'''
main
:param args:
:return:
'''
args.java = utils.get_java(args.java)
utils.check_java(args.java)
# Setup logging
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
loglevel = utils.get_loglevel(args.loglevel)
if args.log_to_file:
logging.basicConfig(filename=args.log_to_file, filemode="w", level=loglevel, format=FORMAT)
else:
logging.basicConfig(level=loglevel, format=FORMAT)
if len(args.vcfs) > 1:
raise NotImplementedError('right now only support one prediction VCF. Quick workaround: src/sort_vcf.sh vcf1 vcf2 > merged.vcf')
global LOGGER
LOGGER = logging.getLogger(__name__)
LOGGER.info('working hard ...')
utils.JAVA_XMX = utils.JAVA_XMX + args.java_max_mem
args.out_dir = os.path.abspath(args.out_dir)
args.reference = os.path.abspath(args.reference)
utils.makedirs([args.out_dir])
varsim_prefix = os.path.join(args.out_dir, 'varsim_compare_results')
varsim_comparator = VarSimVCFComparator(prefix=varsim_prefix, true_vcf = args.true_vcf, reference = args.reference,
regions = None,
sample = args.sample, vcfs = args.vcfs,
exclude_filtered = args.exclude_filtered,
disallow_partial_fp = args.disallow_partial_fp,
match_geno = args.match_geno, log_to_file= args.log_to_file, opts = args.vcfcompare_options, java = args.java,
sv_length=args.sv_length)
varsim_tp, varsim_fn, varsim_fp = varsim_comparator.get_tp(), varsim_comparator.get_fn(), varsim_comparator.get_fp()
varsim_tp = utils.sort_and_compress(varsim_tp)
varsim_fn = utils.sort_and_compress(varsim_fn)
varsim_fp = utils.sort_and_compress(varsim_fp)
#run vcfeval
sdf = args.sdf
if not sdf:
LOGGER.info("user did not supply SDF-formatted reference, trying to generate one...")
sdf = generate_sdf(args.reference, args.log_to_file, java = args.java)
'''for vcfeval
sample column must be present, and not empty
if single-sample vcf, vcfeval doesn't check if samples match in truth and call
in multi-sample vcf, sample name must be specified
right now
'''
vcfeval_prefix = os.path.join(args.out_dir, 'vcfeval_compare_results')
if os.path.exists(vcfeval_prefix):
LOGGER.warn('{0} exists, removing ...'.format(vcfeval_prefix))
shutil.rmtree(vcfeval_prefix)
vcfeval_comparator = RTGVCFComparator(prefix=vcfeval_prefix, true_vcf = varsim_fn, reference = sdf,
regions = None,
sample = args.sample, vcfs = [varsim_fp],
exclude_filtered = args.exclude_filtered,
match_geno = args.match_geno, log_to_file= args.log_to_file,
opts = args.vcfeval_options, java = args.java)
vcfeval_tp, vcfeval_tp_predict = vcfeval_comparator.get_tp(), vcfeval_comparator.get_tp_predict()
augmented_tp, augmented_fn, augmented_fp, augmented_t = merge_results(
outdir = args.out_dir,
varsim_tp = varsim_tp, varsim_fn = varsim_fn,
vcfeval_tp = vcfeval_tp, varsim_fp = varsim_fp, vcfeval_tp_predict = vcfeval_tp_predict)
augmented_tp, augmented_fn, augmented_fp, augmented_t = summarize_results(os.path.join(args.out_dir,"augmented"), augmented_tp, augmented_fn, augmented_fp, augmented_t,
var_types= args.var_types, sv_length= args.sv_length, regions = args.regions, bed_either = args.bed_either, java = args.java, bin_breaks = args.bin_breaks)
if args.master_vcf and args.call_vcf:
match_false(augmented_fp, [args.call_vcf, args.master_vcf, augmented_fn], args.out_dir, args.sample, args.log_to_file, args.vcfeval_options, sdf, args.java)
match_false(augmented_fn, [args.call_vcf], args.out_dir, args.sample, args.log_to_file, args.vcfeval_options, sdf, args.java)
LOGGER.info("Variant comparison done.\nTrue positive: {0}\nFalse negative: {1}\nFalse positive: {2}\n".
format(augmented_tp, augmented_fn, augmented_fp))
def match_false(augmented_file, files_to_pair_with, out_dir, sample, log_to_file, vcfeval_options, sdf, java = "java"):
"""Try to pair up each false call in a file (augmented_file) with a variant in the other files provided in a list (files_to_pair_with) to create an annotated version of the first file.
By default the the first variant in the list is provided to get an AF, the 2nd to determine the simulated variant (for false positives) and the 3rd to determine if a false positive is
a pure false positive (not simulated) or not (wrong genotype)"""
files_to_pair_with_clean = []
for item in files_to_pair_with:
files_to_pair_with_clean.append(utils.make_clean_vcf(item, out_dir))
content = []
annotated_content = []
with utils.versatile_open(augmented_file, "rt") as augmented_file_handle:
for line in augmented_file_handle.readlines():
line_strip = line.strip()
line_split = line_strip.split()
if line_strip[0] == "#":
annotated_content.append(line_strip)
content.append(line_strip)
else:
if content[-1][0] != "#":
del content[-1]
content.append(line_strip)
single_var_file = utils.write_vcf(content, os.path.join(out_dir, "single.vcf"))
single_var_file = utils.sort_and_compress(single_var_file)
single_var_chr = line_split[0]
info = ''
for i, item in enumerate(files_to_pair_with_clean):
nonmatching_gt_variant = None
if item:
vcfeval_prefix = os.path.join(out_dir, 'vcfeval_compare_results_annotate')
#Restrict the comparison to just the chromosome of the single variant by creating a filtered comparison file
filtered_true_vcf = utils.write_filtered_vcf(item, single_var_chr, os.path.join(out_dir, "filtered.vcf"))
filtered_true_vcf = utils.sort_and_compress(filtered_true_vcf)
vcfeval_comparator = RTGVCFComparator(prefix=vcfeval_prefix, true_vcf = filtered_true_vcf, reference = sdf,
regions = None,
sample = sample, vcfs = [single_var_file],
exclude_filtered = False,
match_geno = False,
log_to_file= log_to_file,
opts = vcfeval_options, java = java)
nonmatching_gt_variant = utils.get_closest_variant(line_split, vcfeval_comparator.get_tp())
#if not nonmatching_gt_variant, check for matching alt and ref at the same position. Example of when this could be applicable is a 0/0 call when vcfeval will not pair up variants at the same locus with the same alt and ref even with match_geno=False
if not nonmatching_gt_variant:
nonmatching_gt_variant = utils.get_matching_alt_ref(line_split, filtered_true_vcf)
#clean up
if os.path.exists(vcfeval_prefix):
LOGGER.warn('{0} exists, removing ...'.format(vcfeval_prefix))
shutil.rmtree(vcfeval_prefix)
if i == 0:
AO_RO_DP_AD = {"AO": None, "RO": None, "DP": None, "AD": None}
if nonmatching_gt_variant:
for entry in AO_RO_DP_AD:
AO_RO_DP_AD[entry] = utils.get_info(nonmatching_gt_variant, entry)
# gatk4 format
if AO_RO_DP_AD["AD"]:
AD_split = AO_RO_DP_AD["AD"].split(',')
AO = list(map(int, AD_split[1:]))
RO = int(AD_split[0])
for i, item in enumerate(AO):
comma = ',' if i < len(AO)-1 else ''
if item+RO == 0:
info += "0.0" + comma
else:
info += str(float(item)/(item+RO)) + comma
#freebayes
elif AO_RO_DP_AD["AO"] and AO_RO_DP_AD["RO"]:
for i, item in enumerate(AO_RO_DP_AD["AO"].split(',')):
comma = ',' if i < len(AO_RO_DP_AD["AO"].split(','))-1 else ''
denominator = int(item)+int(AO_RO_DP_AD["RO"])
if denominator == 0:
info += "0.0" + comma
else:
info += str(float(item)/denominator) + comma
else:
info += "N/A"
info += ';'
info += "N/A" if not AO_RO_DP_AD["DP"] else str(AO_RO_DP_AD["DP"])
info += ';'
elif i == 1:
if nonmatching_gt_variant:
info += nonmatching_gt_variant[0]+'_'+nonmatching_gt_variant[1]+'_'+nonmatching_gt_variant[3]+'_'+nonmatching_gt_variant[4]+'_'+nonmatching_gt_variant[-1] + ";"
else:
info += "N/A;"
elif i == 2:
info += "pure;" if not nonmatching_gt_variant else "not;"
line_split[6] = info
annotated_content.append('\t'.join(line_split))
#clean up
for fil in [single_var_file, filtered_true_vcf]:
if os.path.isfile(fil):
os.remove(fil)
os.remove(fil+".tbi")
annotated_file = utils.write_vcf(annotated_content, os.path.join(args.out_dir, "{}_annotated.vcf".format(os.path.splitext(os.path.splitext(os.path.basename(augmented_file))[0])[0])))
annotated_file = utils.sort_and_compress(annotated_file)
#clean up
for item in files_to_pair_with_clean:
if item and os.path.isfile(item):
os.remove(item)
os.remove(item+".tbi")
def print_stats(stats):
'''
print nice stats
adapted from Roger Liu's code.
'''
print ("{0: <15}\t{1: <10}\t{2: <10}\t{3: <10}\t{4: <5}\t{5: <5}\t{6: <5}".format("VariantType","Recall","Precision","F1", "TP","T", "FP"))
for vartype, value in stats.iteritems():
try:
recall = value['tp'] / float(value['t']) if float(value['t']) != 0 else float('NaN')
precision = float(value['tp']) / (value['tp'] + value['fp']) if value['tp'] + value['fp'] != 0 else float('NaN')
f1 = float('NaN') if recall == float('NaN') or precision == float('NaN') or (recall + precision) == 0 else 2 * recall * precision / (recall + precision)
except ValueError:
sys.stderr.write("invalide values\n")
#precision 00.00000% to handle (in worst case) 1 out of 3 million mutations in human genome
print ("{0: <15}\t{1:.5%}\t{2:.5%}\t{3:.5%}\t{4:<5}\t{5:<5}\t{6: <5}".format(vartype, recall, precision, f1, value['tp'], value['t'], value['fp']))
def parse_jsons(jsonfile, stats, count_sv = False, count_all = False):
'''
parse json, extract T, TP, FP stats for various variant types
adapted from Roger Liu's code.
:param jsonfile:
:param stats:
:param count_sv:
:param count_all:
:return:
'''
var_types = stats.keys()
metrics = stats[var_types[0]].keys()
with utils.versatile_open(jsonfile, 'r') as fh:
data = json.load(fh)
for vt in var_types:
if vt in data['num_true_correct']['data']:
for mt in metrics:
try:
if count_all:
stats[vt][mt] += data['num_true_correct']['data'][vt]['sum_count'][mt]
elif count_sv:
stats[vt][mt] += data['num_true_correct']['data'][vt]['svSumCount'][mt]
else:
stats[vt][mt] += data['num_true_correct']['data'][vt]['sum_count'][mt]
stats[vt][mt] -= data['num_true_correct']['data'][vt]['svSumCount'][mt]
except KeyError as err:
print ("error in {}. No {} field".format(jsonfile, err))
stats[vt][mt] += 0
def summarize_results(prefix, tp, fn, fp, t, var_types, sv_length = 100, regions = None, bed_either = False, java = 'java', bin_breaks = None,
ignore_ins_len = False):
'''
count variants by type and tabulate
:param augmented_tp:
:param augmented_fn:
:param augmented_fp:
:param augmented_t:
:return:
'''
cmd = [java, utils.JAVA_XMX, '-jar', utils.VARSIMJAR, 'vcfcompareresultsparser',
'-prefix', prefix, '-tp',tp,
'-fn', fn, '-fp', fp,
'-t', t,
'-sv_length', str(sv_length),
]
if regions:
cmd = cmd + ['-bed', regions]
if bed_either:
cmd = cmd + ['-bed_either']
if bin_breaks:
cmd = cmd + ['-bin_breaks', bin_breaks]
if ignore_ins_len:
cmd = cmd + ['-ignore_ins_len']
utils.run_shell_command(cmd, cmd_stdout=sys.stdout, cmd_stderr=sys.stderr)
tp = prefix + "_tp.vcf"
fn = prefix + "_fn.vcf"
fp = prefix + "_fp.vcf"
t = prefix + "_t.vcf"
tp = utils.sort_and_compress(tp)
fn = utils.sort_and_compress(fn)
fp = utils.sort_and_compress(fp)
t = utils.sort_and_compress(t)
jsonfile = "{0}_report.json".format(prefix)
metrics = ['tp', 'fp', 't', 'fn']
stats = {k: {ii: 0 for ii in metrics} for k in var_types}
parse_jsons(jsonfile, stats)
print("Non-SV stats")
print_stats(stats)
sv_stats = {k: {ii: 0 for ii in metrics} for k in var_types}
parse_jsons(jsonfile, sv_stats, count_sv=True)
print("SV stats")
print_stats(sv_stats)
all_stats = {k: {ii: 0 for ii in metrics} for k in var_types}
parse_jsons(jsonfile, all_stats, count_all=True)
print("Overall stats")
print_stats(all_stats)
return tp, fn, fp, t
if __name__ == "__main__":
main_parser = argparse.ArgumentParser(description="VarSim: A high-fidelity simulation validation framework",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
main_parser.add_argument("--reference", metavar="FASTA", help="reference filename", required=True, type=str)
main_parser.add_argument("--sdf", metavar="SDF", help="SDF formatted reference folder", required=False, type=str, default='')
main_parser.add_argument("--out_dir", metavar="OUTDIR", help="output folder", required=True, type=str)
main_parser.add_argument("--vcfs", metavar="VCF", help="variant calls to be evaluated", nargs="+", default=[], required = True)
main_parser.add_argument("--var_types", metavar="VARTYPE", help="variant types", nargs="+",
default=['SNP','Insertion','Complex','Deletion'],
choices = ['SNP', 'Deletion', 'Insertion', 'Inversion', 'TandemDup',
'Complex', 'TransDup', 'TansDel', 'InterDup', 'Translocation'], required = False)
main_parser.add_argument("--true_vcf", metavar="VCF", help="Input small variant sampling VCF, usually dbSNP", required = True)
main_parser.add_argument("--master_vcf", metavar="MASTER_VCF", help="Master whitelist, if applicable", required = False)
main_parser.add_argument("--call_vcf", metavar="CALL_VCF", help="Original, VCF output by variant caller, if applicable", required = False)
main_parser.add_argument("--regions", help="BED file to restrict analysis [Optional]", required = False, type=str)
main_parser.add_argument("--sample", metavar = "SAMPLE", help="sample name", required = False, type=str)
main_parser.add_argument("--exclude_filtered", action = 'store_true', help="only consider variants with PASS or . in FILTER column", required = False)
main_parser.add_argument("--disallow_partial_fp", action = 'store_true', help="For a partially-matched false negative variant, output all matching variants as false positive", required = False)
main_parser.add_argument("--match_geno", action = 'store_true', help="compare genotype in addition to alleles", required = False)
main_parser.add_argument("--sv_length", type = int, help="length cutoff for SV (only effective for counting, not comparison). For comparison, please add -sv_length to --vcfcompare_options.", required = False, default = 100)
main_parser.add_argument("--ignore_ins_len", action = 'store_true', help="Ignore length of insertion (treat it as 0 length), otherwise ignore insertions and other SVs without proper SVLEN.", required = False)
main_parser.add_argument('--version', action='version', version=utils.get_version())
main_parser.add_argument("--log_to_file", metavar="LOGFILE", help="logfile. If not specified, log to stderr", required=False, type=str, default="")
main_parser.add_argument("--loglevel", help="Set logging level", choices=["debug", "warn", "info"], default="info")
main_parser.add_argument("--vcfcompare_options", metavar="OPT", help="additional options for VarSim vcfcompare", default="", type = str)
main_parser.add_argument("--vcfeval_options", metavar="OPT", help="additional options for RTG vcfeval", default="", type = str)
main_parser.add_argument("--bed_either", action = 'store_true', help="Use either break-end of the variant for filtering instead of both")
main_parser.add_argument("--java_max_mem", metavar="XMX", help="max java memory", default="10g", type = str)
main_parser.add_argument("--java", metavar="PATH", help="path to java", default="java", type = str)
main_parser.add_argument("--bin_breaks", metavar="INPUT_STR", help="user defined bin breaks", required = False, type = str)
args = main_parser.parse_args()
process(args)
| |
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""File and file-path manipulation utilities.
:group path manipulation: first_level_directory, relative_path, is_binary,\
get_by_ext, remove_dead_links
:group file manipulation: norm_read, norm_open, lines, stream_lines, lines,\
write_open_mode, ensure_fs_mode, export
:sort: path manipulation, file manipulation
"""
from __future__ import print_function
__docformat__ = "restructuredtext en"
import sys
import shutil
import mimetypes
from os.path import isabs, isdir, islink, split, exists, normpath, join
from os.path import abspath
from os import sep, mkdir, remove, listdir, stat, chmod, walk
from stat import ST_MODE, S_IWRITE
from logilab.common import STD_BLACKLIST as BASE_BLACKLIST, IGNORED_EXTENSIONS
from logilab.common.shellutils import find
from logilab.common.deprecation import deprecated
from logilab.common.compat import FileIO
def first_level_directory(path):
"""Return the first level directory of a path.
>>> first_level_directory('home/syt/work')
'home'
>>> first_level_directory('/home/syt/work')
'/'
>>> first_level_directory('work')
'work'
>>>
:type path: str
:param path: the path for which we want the first level directory
:rtype: str
:return: the first level directory appearing in `path`
"""
head, tail = split(path)
while head and tail:
head, tail = split(head)
if tail:
return tail
# path was absolute, head is the fs root
return head
def abspath_listdir(path):
"""Lists path's content using absolute paths."""
path = abspath(path)
return [join(path, filename) for filename in listdir(path)]
def is_binary(filename):
"""Return true if filename may be a binary file, according to it's
extension.
:type filename: str
:param filename: the name of the file
:rtype: bool
:return:
true if the file is a binary file (actually if it's mime type
isn't beginning by text/)
"""
try:
return not mimetypes.guess_type(filename)[0].startswith('text')
except AttributeError:
return 1
def write_open_mode(filename):
"""Return the write mode that should used to open file.
:type filename: str
:param filename: the name of the file
:rtype: str
:return: the mode that should be use to open the file ('w' or 'wb')
"""
if is_binary(filename):
return 'wb'
return 'w'
def ensure_fs_mode(filepath, desired_mode=S_IWRITE):
"""Check that the given file has the given mode(s) set, else try to
set it.
:type filepath: str
:param filepath: path of the file
:type desired_mode: int
:param desired_mode:
ORed flags describing the desired mode. Use constants from the
`stat` module for file permission's modes
"""
mode = stat(filepath)[ST_MODE]
if not mode & desired_mode:
chmod(filepath, mode | desired_mode)
# XXX (syt) unused? kill?
class ProtectedFile(FileIO):
"""A special file-object class that automatically does a 'chmod +w' when
needed.
XXX: for now, the way it is done allows 'normal file-objects' to be
created during the ProtectedFile object lifetime.
One way to circumvent this would be to chmod / unchmod on each
write operation.
One other way would be to :
- catch the IOError in the __init__
- if IOError, then create a StringIO object
- each write operation writes in this StringIO object
- on close()/del(), write/append the StringIO content to the file and
do the chmod only once
"""
def __init__(self, filepath, mode):
self.original_mode = stat(filepath)[ST_MODE]
self.mode_changed = False
if mode in ('w', 'a', 'wb', 'ab'):
if not self.original_mode & S_IWRITE:
chmod(filepath, self.original_mode | S_IWRITE)
self.mode_changed = True
FileIO.__init__(self, filepath, mode)
def _restore_mode(self):
"""restores the original mode if needed"""
if self.mode_changed:
chmod(self.name, self.original_mode)
# Don't re-chmod in case of several restore
self.mode_changed = False
def close(self):
"""restore mode before closing"""
self._restore_mode()
FileIO.close(self)
def __del__(self):
if not self.closed:
self.close()
class UnresolvableError(Exception):
"""Exception raised by relative path when it's unable to compute relative
path between two paths.
"""
def relative_path(from_file, to_file):
"""Try to get a relative path from `from_file` to `to_file`
(path will be absolute if to_file is an absolute file). This function
is useful to create link in `from_file` to `to_file`. This typical use
case is used in this function description.
If both files are relative, they're expected to be relative to the same
directory.
>>> relative_path( from_file='toto/index.html', to_file='index.html')
'../index.html'
>>> relative_path( from_file='index.html', to_file='toto/index.html')
'toto/index.html'
>>> relative_path( from_file='tutu/index.html', to_file='toto/index.html')
'../toto/index.html'
>>> relative_path( from_file='toto/index.html', to_file='/index.html')
'/index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/index.html')
'../index.html'
>>> relative_path( from_file='/toto/index.html', to_file='/toto/summary.html')
'summary.html'
>>> relative_path( from_file='index.html', to_file='index.html')
''
>>> relative_path( from_file='/index.html', to_file='toto/index.html')
Traceback (most recent call last):
File "<string>", line 1, in ?
File "<stdin>", line 37, in relative_path
UnresolvableError
>>> relative_path( from_file='/index.html', to_file='/index.html')
''
>>>
:type from_file: str
:param from_file: source file (where links will be inserted)
:type to_file: str
:param to_file: target file (on which links point)
:raise UnresolvableError: if it has been unable to guess a correct path
:rtype: str
:return: the relative path of `to_file` from `from_file`
"""
from_file = normpath(from_file)
to_file = normpath(to_file)
if from_file == to_file:
return ''
if isabs(to_file):
if not isabs(from_file):
return to_file
elif isabs(from_file):
raise UnresolvableError()
from_parts = from_file.split(sep)
to_parts = to_file.split(sep)
idem = 1
result = []
while len(from_parts) > 1:
dirname = from_parts.pop(0)
if idem and len(to_parts) > 1 and dirname == to_parts[0]:
to_parts.pop(0)
else:
idem = 0
result.append('..')
result += to_parts
return sep.join(result)
def norm_read(path):
"""Return the content of the file with normalized line feeds.
:type path: str
:param path: path to the file to read
:rtype: str
:return: the content of the file with normalized line feeds
"""
return open(path, 'U').read()
norm_read = deprecated("use \"open(path, 'U').read()\"")(norm_read)
def norm_open(path):
"""Return a stream for a file with content with normalized line feeds.
:type path: str
:param path: path to the file to open
:rtype: file or StringIO
:return: the opened file with normalized line feeds
"""
return open(path, 'U')
norm_open = deprecated("use \"open(path, 'U')\"")(norm_open)
def lines(path, comments=None):
"""Return a list of non empty lines in the file located at `path`.
:type path: str
:param path: path to the file
:type comments: str or None
:param comments:
optional string which can be used to comment a line in the file
(i.e. lines starting with this string won't be returned)
:rtype: list
:return:
a list of stripped line in the file, without empty and commented
lines
:warning: at some point this function will probably return an iterator
"""
stream = open(path, 'U')
result = stream_lines(stream, comments)
stream.close()
return result
def stream_lines(stream, comments=None):
"""Return a list of non empty lines in the given `stream`.
:type stream: object implementing 'xreadlines' or 'readlines'
:param stream: file like object
:type comments: str or None
:param comments:
optional string which can be used to comment a line in the file
(i.e. lines starting with this string won't be returned)
:rtype: list
:return:
a list of stripped line in the file, without empty and commented
lines
:warning: at some point this function will probably return an iterator
"""
try:
readlines = stream.xreadlines
except AttributeError:
readlines = stream.readlines
result = []
for line in readlines():
line = line.strip()
if line and (comments is None or not line.startswith(comments)):
result.append(line)
return result
def export(from_dir, to_dir,
blacklist=BASE_BLACKLIST, ignore_ext=IGNORED_EXTENSIONS,
verbose=0):
"""Make a mirror of `from_dir` in `to_dir`, omitting directories and
files listed in the black list or ending with one of the given
extensions.
:type from_dir: str
:param from_dir: directory to export
:type to_dir: str
:param to_dir: destination directory
:type blacklist: list or tuple
:param blacklist:
list of files or directories to ignore, default to the content of
`BASE_BLACKLIST`
:type ignore_ext: list or tuple
:param ignore_ext:
list of extensions to ignore, default to the content of
`IGNORED_EXTENSIONS`
:type verbose: bool
:param verbose:
flag indicating whether information about exported files should be
printed to stderr, default to False
"""
try:
mkdir(to_dir)
except OSError:
pass # FIXME we should use "exists" if the point is about existing dir
# else (permission problems?) shouldn't return / raise ?
for directory, dirnames, filenames in walk(from_dir):
for norecurs in blacklist:
try:
dirnames.remove(norecurs)
except ValueError:
continue
for dirname in dirnames:
src = join(directory, dirname)
dest = to_dir + src[len(from_dir):]
if isdir(src):
if not exists(dest):
mkdir(dest)
for filename in filenames:
# don't include binary files
# endswith does not accept tuple in 2.4
if any([filename.endswith(ext) for ext in ignore_ext]):
continue
src = join(directory, filename)
dest = to_dir + src[len(from_dir):]
if verbose:
print(src, '->', dest, file=sys.stderr)
if exists(dest):
remove(dest)
shutil.copy2(src, dest)
def remove_dead_links(directory, verbose=0):
"""Recursively traverse directory and remove all dead links.
:type directory: str
:param directory: directory to cleanup
:type verbose: bool
:param verbose:
flag indicating whether information about deleted links should be
printed to stderr, default to False
"""
for dirpath, dirname, filenames in walk(directory):
for filename in dirnames + filenames:
src = join(dirpath, filename)
if islink(src) and not exists(src):
if verbose:
print('remove dead link', src)
remove(src)
| |
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Fixed-Point parameter type testcases - FP8_Q7.0
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
FP8_Q7.0 :
- size = 8 bits
- 7 integer bits, 0 fractionnal bits
- range : [-128, 127]
Test cases :
------------
- FP8_Q7.0 parameter min value = -128
- FP8_Q7.0 parameter min value out of bounds = -128.1
- FP8_Q7.0 parameter max value = 127
- FP8_Q7.0 parameter max value out of bounds = 127.1
- FP8_Q7.0 parameter in nominal case = 64
"""
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type FP8_Q7.0 - range [-128,127]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/FP8_Q7.0"
self.pfw.sendCmd("setTuningMode", "on")
self.type_name = "FP8_Q7.0"
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing FP8_Q7.0 in nominal case = 64
-------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP8_Q7.0 parameter in nominal case = 64
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- FP8_Q7.0 parameter set to 64
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
value = "64"
hex_value = "0x40"
log.I("Setting %s to value %s" % (self.type_name, value))
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert float(out) == float(value), log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP8_Q7.0') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing FP8_Q7.0 minimal value = -128
-------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP8_Q7.0 parameter min value = -128
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- FP8_Q7.0 parameter set to -128
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
value = "-128"
hex_value = "0x80"
log.I("Setting %s to value %s" % (self.type_name, value))
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert float(out) == float(value), log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP8_Q7.0') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing FP8_Q7.0 parameter value out of negative range
------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP8_Q7.0 to -128.1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- FP8_Q7.0 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
value = "-128.1"
param_check = commands.getoutput('cat $PFW_RESULT/FP8_Q7.0')
log.I("Setting %s to value %s" % (self.type_name, value))
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP8_Q7.0') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing FP8_Q7.0 parameter maximum value
----------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP8_Q7.0 to 127
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- FP8_Q7.0 parameter set to 127
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
value = "127"
hex_value = "0x7f"
log.I("Setting %s to value %s" % (self.type_name, value))
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert float(out) == float(value), log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP8_Q7.0') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing FP8_Q7.0 parameter value out of positive range
------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set FP8_Q7.0 to 127.1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- FP8_Q7.0 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
value = "127.1"
param_check = commands.getoutput('cat $PFW_RESULT/FP8_Q7.0')
log.I("Setting %s to value %s" % (self.type_name, value))
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/FP8_Q7.0') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/route-selection-options/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information for the route selection options
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__always_compare_med",
"__ignore_as_path_length",
"__external_compare_router_id",
"__advertise_inactive_routes",
"__enable_aigp",
"__ignore_next_hop_igp_metric",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__always_compare_med = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="always-compare-med",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__ignore_as_path_length = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-as-path-length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__external_compare_router_id = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="external-compare-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__advertise_inactive_routes = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="advertise-inactive-routes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__enable_aigp = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enable-aigp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__ignore_next_hop_igp_metric = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-next-hop-igp-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"afi-safis",
"afi-safi",
"route-selection-options",
"state",
]
def _get_always_compare_med(self):
"""
Getter method for always_compare_med, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/always_compare_med (boolean)
YANG Description: Compare multi-exit discriminator (MED) value from
different ASes when selecting the best route. The
default behavior is to only compare MEDs for paths
received from the same AS.
"""
return self.__always_compare_med
def _set_always_compare_med(self, v, load=False):
"""
Setter method for always_compare_med, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/always_compare_med (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_always_compare_med is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_always_compare_med() directly.
YANG Description: Compare multi-exit discriminator (MED) value from
different ASes when selecting the best route. The
default behavior is to only compare MEDs for paths
received from the same AS.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="always-compare-med",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """always_compare_med must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="always-compare-med", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__always_compare_med = t
if hasattr(self, "_set"):
self._set()
def _unset_always_compare_med(self):
self.__always_compare_med = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="always-compare-med",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_ignore_as_path_length(self):
"""
Getter method for ignore_as_path_length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/ignore_as_path_length (boolean)
YANG Description: Ignore the AS path length when selecting the best path.
The default is to use the AS path length and prefer paths
with shorter length.
"""
return self.__ignore_as_path_length
def _set_ignore_as_path_length(self, v, load=False):
"""
Setter method for ignore_as_path_length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/ignore_as_path_length (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_ignore_as_path_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ignore_as_path_length() directly.
YANG Description: Ignore the AS path length when selecting the best path.
The default is to use the AS path length and prefer paths
with shorter length.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-as-path-length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ignore_as_path_length must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="ignore-as-path-length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__ignore_as_path_length = t
if hasattr(self, "_set"):
self._set()
def _unset_ignore_as_path_length(self):
self.__ignore_as_path_length = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-as-path-length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_external_compare_router_id(self):
"""
Getter method for external_compare_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/external_compare_router_id (boolean)
YANG Description: When comparing similar routes received from external
BGP peers, use the router-id as a criterion to select
the active path.
"""
return self.__external_compare_router_id
def _set_external_compare_router_id(self, v, load=False):
"""
Setter method for external_compare_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/external_compare_router_id (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_compare_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_compare_router_id() directly.
YANG Description: When comparing similar routes received from external
BGP peers, use the router-id as a criterion to select
the active path.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="external-compare-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_compare_router_id must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="external-compare-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__external_compare_router_id = t
if hasattr(self, "_set"):
self._set()
def _unset_external_compare_router_id(self):
self.__external_compare_router_id = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="external-compare-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_advertise_inactive_routes(self):
"""
Getter method for advertise_inactive_routes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/advertise_inactive_routes (boolean)
YANG Description: Advertise inactive routes to external peers. The
default is to only advertise active routes.
"""
return self.__advertise_inactive_routes
def _set_advertise_inactive_routes(self, v, load=False):
"""
Setter method for advertise_inactive_routes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/advertise_inactive_routes (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_advertise_inactive_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_advertise_inactive_routes() directly.
YANG Description: Advertise inactive routes to external peers. The
default is to only advertise active routes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="advertise-inactive-routes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """advertise_inactive_routes must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="advertise-inactive-routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__advertise_inactive_routes = t
if hasattr(self, "_set"):
self._set()
def _unset_advertise_inactive_routes(self):
self.__advertise_inactive_routes = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="advertise-inactive-routes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_enable_aigp(self):
"""
Getter method for enable_aigp, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/enable_aigp (boolean)
YANG Description: Flag to enable sending / receiving accumulated IGP
attribute in routing updates
"""
return self.__enable_aigp
def _set_enable_aigp(self, v, load=False):
"""
Setter method for enable_aigp, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/enable_aigp (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enable_aigp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enable_aigp() directly.
YANG Description: Flag to enable sending / receiving accumulated IGP
attribute in routing updates
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enable-aigp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enable_aigp must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-aigp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enable_aigp = t
if hasattr(self, "_set"):
self._set()
def _unset_enable_aigp(self):
self.__enable_aigp = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enable-aigp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_ignore_next_hop_igp_metric(self):
"""
Getter method for ignore_next_hop_igp_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/ignore_next_hop_igp_metric (boolean)
YANG Description: Ignore the IGP metric to the next-hop when calculating
BGP best-path. The default is to select the route for
which the metric to the next-hop is lowest
"""
return self.__ignore_next_hop_igp_metric
def _set_ignore_next_hop_igp_metric(self, v, load=False):
"""
Setter method for ignore_next_hop_igp_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/ignore_next_hop_igp_metric (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_ignore_next_hop_igp_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ignore_next_hop_igp_metric() directly.
YANG Description: Ignore the IGP metric to the next-hop when calculating
BGP best-path. The default is to select the route for
which the metric to the next-hop is lowest
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-next-hop-igp-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ignore_next_hop_igp_metric must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="ignore-next-hop-igp-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__ignore_next_hop_igp_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_ignore_next_hop_igp_metric(self):
self.__ignore_next_hop_igp_metric = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-next-hop-igp-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
always_compare_med = __builtin__.property(_get_always_compare_med)
ignore_as_path_length = __builtin__.property(_get_ignore_as_path_length)
external_compare_router_id = __builtin__.property(_get_external_compare_router_id)
advertise_inactive_routes = __builtin__.property(_get_advertise_inactive_routes)
enable_aigp = __builtin__.property(_get_enable_aigp)
ignore_next_hop_igp_metric = __builtin__.property(_get_ignore_next_hop_igp_metric)
_pyangbind_elements = OrderedDict(
[
("always_compare_med", always_compare_med),
("ignore_as_path_length", ignore_as_path_length),
("external_compare_router_id", external_compare_router_id),
("advertise_inactive_routes", advertise_inactive_routes),
("enable_aigp", enable_aigp),
("ignore_next_hop_igp_metric", ignore_next_hop_igp_metric),
]
)
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/afi-safis/afi-safi/route-selection-options/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State information for the route selection options
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__always_compare_med",
"__ignore_as_path_length",
"__external_compare_router_id",
"__advertise_inactive_routes",
"__enable_aigp",
"__ignore_next_hop_igp_metric",
)
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__always_compare_med = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="always-compare-med",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__ignore_as_path_length = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-as-path-length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__external_compare_router_id = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="external-compare-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__advertise_inactive_routes = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="advertise-inactive-routes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__enable_aigp = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enable-aigp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
self.__ignore_next_hop_igp_metric = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-next-hop-igp-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"afi-safis",
"afi-safi",
"route-selection-options",
"state",
]
def _get_always_compare_med(self):
"""
Getter method for always_compare_med, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/always_compare_med (boolean)
YANG Description: Compare multi-exit discriminator (MED) value from
different ASes when selecting the best route. The
default behavior is to only compare MEDs for paths
received from the same AS.
"""
return self.__always_compare_med
def _set_always_compare_med(self, v, load=False):
"""
Setter method for always_compare_med, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/always_compare_med (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_always_compare_med is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_always_compare_med() directly.
YANG Description: Compare multi-exit discriminator (MED) value from
different ASes when selecting the best route. The
default behavior is to only compare MEDs for paths
received from the same AS.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="always-compare-med",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """always_compare_med must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="always-compare-med", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__always_compare_med = t
if hasattr(self, "_set"):
self._set()
def _unset_always_compare_med(self):
self.__always_compare_med = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="always-compare-med",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_ignore_as_path_length(self):
"""
Getter method for ignore_as_path_length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/ignore_as_path_length (boolean)
YANG Description: Ignore the AS path length when selecting the best path.
The default is to use the AS path length and prefer paths
with shorter length.
"""
return self.__ignore_as_path_length
def _set_ignore_as_path_length(self, v, load=False):
"""
Setter method for ignore_as_path_length, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/ignore_as_path_length (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_ignore_as_path_length is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ignore_as_path_length() directly.
YANG Description: Ignore the AS path length when selecting the best path.
The default is to use the AS path length and prefer paths
with shorter length.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-as-path-length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ignore_as_path_length must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="ignore-as-path-length", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__ignore_as_path_length = t
if hasattr(self, "_set"):
self._set()
def _unset_ignore_as_path_length(self):
self.__ignore_as_path_length = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-as-path-length",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_external_compare_router_id(self):
"""
Getter method for external_compare_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/external_compare_router_id (boolean)
YANG Description: When comparing similar routes received from external
BGP peers, use the router-id as a criterion to select
the active path.
"""
return self.__external_compare_router_id
def _set_external_compare_router_id(self, v, load=False):
"""
Setter method for external_compare_router_id, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/external_compare_router_id (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_external_compare_router_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_external_compare_router_id() directly.
YANG Description: When comparing similar routes received from external
BGP peers, use the router-id as a criterion to select
the active path.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="external-compare-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """external_compare_router_id must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("true"), is_leaf=True, yang_name="external-compare-router-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__external_compare_router_id = t
if hasattr(self, "_set"):
self._set()
def _unset_external_compare_router_id(self):
self.__external_compare_router_id = YANGDynClass(
base=YANGBool,
default=YANGBool("true"),
is_leaf=True,
yang_name="external-compare-router-id",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_advertise_inactive_routes(self):
"""
Getter method for advertise_inactive_routes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/advertise_inactive_routes (boolean)
YANG Description: Advertise inactive routes to external peers. The
default is to only advertise active routes.
"""
return self.__advertise_inactive_routes
def _set_advertise_inactive_routes(self, v, load=False):
"""
Setter method for advertise_inactive_routes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/advertise_inactive_routes (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_advertise_inactive_routes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_advertise_inactive_routes() directly.
YANG Description: Advertise inactive routes to external peers. The
default is to only advertise active routes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="advertise-inactive-routes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """advertise_inactive_routes must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="advertise-inactive-routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__advertise_inactive_routes = t
if hasattr(self, "_set"):
self._set()
def _unset_advertise_inactive_routes(self):
self.__advertise_inactive_routes = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="advertise-inactive-routes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_enable_aigp(self):
"""
Getter method for enable_aigp, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/enable_aigp (boolean)
YANG Description: Flag to enable sending / receiving accumulated IGP
attribute in routing updates
"""
return self.__enable_aigp
def _set_enable_aigp(self, v, load=False):
"""
Setter method for enable_aigp, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/enable_aigp (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enable_aigp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enable_aigp() directly.
YANG Description: Flag to enable sending / receiving accumulated IGP
attribute in routing updates
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enable-aigp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enable_aigp must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enable-aigp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__enable_aigp = t
if hasattr(self, "_set"):
self._set()
def _unset_enable_aigp(self):
self.__enable_aigp = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enable-aigp",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
def _get_ignore_next_hop_igp_metric(self):
"""
Getter method for ignore_next_hop_igp_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/ignore_next_hop_igp_metric (boolean)
YANG Description: Ignore the IGP metric to the next-hop when calculating
BGP best-path. The default is to select the route for
which the metric to the next-hop is lowest
"""
return self.__ignore_next_hop_igp_metric
def _set_ignore_next_hop_igp_metric(self, v, load=False):
"""
Setter method for ignore_next_hop_igp_metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/afi_safis/afi_safi/route_selection_options/state/ignore_next_hop_igp_metric (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_ignore_next_hop_igp_metric is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ignore_next_hop_igp_metric() directly.
YANG Description: Ignore the IGP metric to the next-hop when calculating
BGP best-path. The default is to select the route for
which the metric to the next-hop is lowest
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-next-hop-igp-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """ignore_next_hop_igp_metric must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="ignore-next-hop-igp-metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__ignore_next_hop_igp_metric = t
if hasattr(self, "_set"):
self._set()
def _unset_ignore_next_hop_igp_metric(self):
self.__ignore_next_hop_igp_metric = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="ignore-next-hop-igp-metric",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
always_compare_med = __builtin__.property(_get_always_compare_med)
ignore_as_path_length = __builtin__.property(_get_ignore_as_path_length)
external_compare_router_id = __builtin__.property(_get_external_compare_router_id)
advertise_inactive_routes = __builtin__.property(_get_advertise_inactive_routes)
enable_aigp = __builtin__.property(_get_enable_aigp)
ignore_next_hop_igp_metric = __builtin__.property(_get_ignore_next_hop_igp_metric)
_pyangbind_elements = OrderedDict(
[
("always_compare_med", always_compare_med),
("ignore_as_path_length", ignore_as_path_length),
("external_compare_router_id", external_compare_router_id),
("advertise_inactive_routes", advertise_inactive_routes),
("enable_aigp", enable_aigp),
("ignore_next_hop_igp_metric", ignore_next_hop_igp_metric),
]
)
| |
from __future__ import absolute_import
import time
from types import TracebackType
from decimal import Decimal
import pytest
from tornado import gen, ioloop
from concurrent.futures import Future, as_completed, TimeoutError
from threadloop import ThreadLoop
from threadloop.exceptions import ThreadNotStartedError
@pytest.fixture(autouse=True)
def clear_io_loop():
# clear the current IOLoop before all tests
ioloop.IOLoop.clear_current()
@pytest.yield_fixture
def threadloop():
with ThreadLoop() as threadloop:
yield threadloop
class TestException(Exception):
pass
def test_coroutine_returns_future(threadloop):
@gen.coroutine
def coroutine():
raise gen.Return("Hello World")
future = threadloop.submit(coroutine)
assert isinstance(future, Future), "expected a concurrent.futures.Future"
assert future.result() == "Hello World"
def test_propogates_arguments(threadloop):
@gen.coroutine
def coroutine(message, adjective="Shady"):
raise gen.Return("Hello %s %s" % (adjective, message))
future = threadloop.submit(coroutine, "World")
assert future.result() == "Hello Shady World"
future = threadloop.submit(coroutine, "World", adjective="Cloudy")
assert future.result() == "Hello Cloudy World"
def test_coroutine_exception_propagates(threadloop):
@gen.coroutine
def coroutine():
raise TestException()
with pytest.raises(TestException):
future = threadloop.submit(coroutine)
future.result()
def test_coroutine_exception_contains_exc_info(threadloop):
@gen.coroutine
def coroutine():
raise TestException('something went wrong')
with pytest.raises(Exception) as exc_info:
threadloop.submit(coroutine).result()
assert 'something went wrong' in str(exc_info.value)
assert isinstance(exc_info.value, TestException)
assert isinstance(exc_info.tb, TracebackType)
assert (
"raise TestException('something went wrong')"
in str(exc_info.traceback[-1])
)
def test_propagate_concurrent_future_exception(threadloop):
def func():
future = Future()
future.set_exception(AttributeError())
return future
with pytest.raises(AttributeError):
threadloop.submit(func).result()
def test_plain_function(threadloop):
def not_a_coroutine():
return "Hello World"
future = threadloop.submit(not_a_coroutine)
assert isinstance(future, Future), "expected a concurrent.futures.Future"
assert future.result() == "Hello World"
def test_plain_function_exception_propagates(threadloop):
def not_a_coroutine():
raise TestException()
future = threadloop.submit(not_a_coroutine)
with pytest.raises(TestException):
future = threadloop.submit(not_a_coroutine)
future.result()
def test_plain_function_exception_contains_exc_info(threadloop):
def not_a_coroutine():
raise TestException('something went wrong')
with pytest.raises(Exception) as exc_info:
threadloop.submit(not_a_coroutine).result()
assert 'something went wrong' in str(exc_info.value)
assert isinstance(exc_info.value, TestException)
assert isinstance(exc_info.tb, TracebackType)
assert (
"raise TestException('something went wrong')"
in str(exc_info.traceback[-1])
)
def test_use_existing_ioloop():
io_loop = ioloop.IOLoop.current()
threadloop = ThreadLoop(io_loop)
assert threadloop._io_loop is io_loop
@gen.coroutine
def coroutine():
raise gen.Return("Hello World")
with threadloop:
future = threadloop.submit(coroutine)
assert future.result() == "Hello World"
def test_start_must_be_called_before_submit():
threadloop = ThreadLoop()
@gen.coroutine
def coroutine():
raise gen.Return("Hello World")
with pytest.raises(ThreadNotStartedError):
threadloop.submit(coroutine)
def test_submits_coroutines_concurrently(threadloop):
@gen.coroutine
def coroutine1():
yield gen.sleep(.1)
raise gen.Return('coroutine1')
@gen.coroutine
def coroutine2():
yield gen.sleep(.1)
raise gen.Return('coroutine2')
@gen.coroutine
def coroutine3():
yield gen.sleep(.1)
raise gen.Return('coroutine3')
start = time.time()
future1 = threadloop.submit(coroutine1)
future2 = threadloop.submit(coroutine2)
future3 = threadloop.submit(coroutine3)
result1 = future1.result()
result2 = future2.result()
result3 = future3.result()
end = time.time() - start
# round to float with precision of 1, eg 0.3
took = float(round(Decimal(str(end)), 1))
# should only take ~100 ms to finish both
# instead of ~300ms if they were executed serially
assert took == .1
assert result1 == 'coroutine1'
assert result2 == 'coroutine2'
assert result3 == 'coroutine3'
def test_as_completed(threadloop):
@gen.coroutine
def coroutine1():
yield gen.sleep(.02)
raise gen.Return('coroutine1')
@gen.coroutine
def coroutine2():
yield gen.sleep(.03)
raise gen.Return('coroutine2')
@gen.coroutine
def coroutine3():
yield gen.sleep(.01)
raise gen.Return('coroutine3')
@gen.coroutine
def coroutine4():
yield gen.sleep(.04)
raise gen.Return('coroutine4')
futures = []
futures.append(threadloop.submit(coroutine1))
futures.append(threadloop.submit(coroutine2))
futures.append(threadloop.submit(coroutine3))
futures.append(threadloop.submit(coroutine4))
i = 0
for future in as_completed(futures):
i = i + 1
# make sure futures finish in the expected order
if i == 1:
assert future.result() == "coroutine3"
elif i == 2:
assert future.result() == "coroutine1"
elif i == 3:
assert future.result() == "coroutine2"
elif i == 4:
assert future.result() == "coroutine4"
assert i == 4, "expected 4 completed futures"
def test_timeout(threadloop):
@gen.coroutine
def too_long():
yield gen.sleep(5) # 5 sec task
raise gen.Return('that was too long')
start = time.time()
future = threadloop.submit(too_long)
with pytest.raises(TimeoutError):
future.result(timeout=.001)
end = time.time() - start
took = float(round(Decimal(str(end)), 1))
assert took <= .002
def test_block_until_thread_is_ready():
threadloop = ThreadLoop()
assert not threadloop.is_ready()
threadloop.start()
assert threadloop.is_ready()
def test_is_not_ready_when_ready_hasnt_been_sent():
threadloop = ThreadLoop()
threadloop._thread = True # fake the Thread being set
assert not threadloop.is_ready()
def test_main_io_loop_is_not_changed():
threadloop = ThreadLoop()
threadloop.start()
# The ThreadLoop's IOLoop should not be the 'current' IOLoop in the main
# thread.
tl_loop = threadloop.submit(ioloop.IOLoop.current).result()
assert ioloop.IOLoop.current() is not tl_loop
def test_ioloop_is_not_already_running():
threadloop = ThreadLoop()
threadloop.start()
@gen.coroutine
def f():
yield threadloop.submit(gen.sleep, 0.1)
ioloop.IOLoop.current().run_sync(f)
| |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/paraparser.py
__version__=''' $Id: paraparser.py 3774 2010-09-08 15:11:10Z rgbecker $ '''
__doc__='''The parser used to process markup within paragraphs'''
import string
import re
from types import TupleType, UnicodeType, StringType
import sys
import os
import copy
import base64
try:
import cPickle as pickle
except:
import pickle
import unicodedata
import reportlab.lib.sequencer
from reportlab.lib.abag import ABag
from reportlab.lib.utils import ImageReader
from reportlab.lib import xmllib
from reportlab.lib.colors import toColor, white, black, red, Color
from reportlab.lib.fonts import tt2ps, ps2tt
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
from reportlab.lib.units import inch,mm,cm,pica
_re_para = re.compile(r'^\s*<\s*para(?:\s+|>|/>)')
sizeDelta = 2 # amount to reduce font size by for super and sub script
subFraction = 0.5 # fraction of font size that a sub script should be lowered
superFraction = 0.5 # fraction of font size that a super script should be raised
DEFAULT_INDEX_NAME='_indexAdd'
def _convnum(s, unit=1, allowRelative=True):
if s[0] in ('+','-') and allowRelative:
try:
return ('relative',int(s)*unit)
except ValueError:
return ('relative',float(s)*unit)
else:
try:
return int(s)*unit
except ValueError:
return float(s)*unit
def _num(s, unit=1, allowRelative=True):
"""Convert a string like '10cm' to an int or float (in points).
The default unit is point, but optionally you can use other
default units like mm.
"""
if s.endswith('cm'):
unit=cm
s = s[:-2]
if s.endswith('in'):
unit=inch
s = s[:-2]
if s.endswith('pt'):
unit=1
s = s[:-2]
if s.endswith('i'):
unit=inch
s = s[:-1]
if s.endswith('mm'):
unit=mm
s = s[:-2]
if s.endswith('pica'):
unit=pica
s = s[:-4]
return _convnum(s,unit,allowRelative)
def _numpct(s,unit=1,allowRelative=False):
if s.endswith('%'):
return _PCT(_convnum(s[:-1],allowRelative=allowRelative))
else:
return _num(s,unit,allowRelative)
class _PCT:
def __init__(self,v):
self._value = v*0.01
def normalizedValue(self,normalizer):
normalizer = normalizer or getattr(self,'_normalizer')
return normalizer*self._value
def _valignpc(s):
s = s.lower()
if s in ('baseline','sub','super','top','text-top','middle','bottom','text-bottom'):
return s
if s.endswith('%'):
n = _convnum(s[:-1])
if isinstance(n,tuple):
n = n[1]
return _PCT(n)
n = _num(s)
if isinstance(n,tuple):
n = n[1]
return n
def _autoLeading(x):
x = x.lower()
if x in ('','min','max','off'):
return x
raise ValueError('Invalid autoLeading=%r' % x )
def _align(s):
s = string.lower(s)
if s=='left': return TA_LEFT
elif s=='right': return TA_RIGHT
elif s=='justify': return TA_JUSTIFY
elif s in ('centre','center'): return TA_CENTER
else: raise ValueError
_paraAttrMap = {'font': ('fontName', None),
'face': ('fontName', None),
'fontsize': ('fontSize', _num),
'size': ('fontSize', _num),
'leading': ('leading', _num),
'autoleading': ('autoLeading', _autoLeading),
'lindent': ('leftIndent', _num),
'rindent': ('rightIndent', _num),
'findent': ('firstLineIndent', _num),
'align': ('alignment', _align),
'spaceb': ('spaceBefore', _num),
'spacea': ('spaceAfter', _num),
'bfont': ('bulletFontName', None),
'bfontsize': ('bulletFontSize',_num),
'boffsety': ('bulletOffsetY',_num),
'bindent': ('bulletIndent',_num),
'bcolor': ('bulletColor',toColor),
'color':('textColor',toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'bg':('backColor',toColor),
'fg': ('textColor',toColor),
}
_bulletAttrMap = {
'font': ('bulletFontName', None),
'face': ('bulletFontName', None),
'size': ('bulletFontSize',_num),
'fontsize': ('bulletFontSize',_num),
'offsety': ('bulletOffsetY',_num),
'indent': ('bulletIndent',_num),
'color': ('bulletColor',toColor),
'fg': ('bulletColor',toColor),
}
#things which are valid font attributes
_fontAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
}
#things which are valid font attributes
_linkAttrMap = {'size': ('fontSize', _num),
'face': ('fontName', None),
'name': ('fontName', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'dest': ('link', None),
'destination': ('link', None),
'target': ('link', None),
'href': ('link', None),
}
_anchorAttrMap = {'fontSize': ('fontSize', _num),
'fontName': ('fontName', None),
'name': ('name', None),
'fg': ('textColor', toColor),
'color':('textColor', toColor),
'backcolor':('backColor',toColor),
'bgcolor':('backColor',toColor),
'href': ('href', None),
}
_imgAttrMap = {
'src': ('src', None),
'width': ('width',_numpct),
'height':('height',_numpct),
'valign':('valign',_valignpc),
}
_indexAttrMap = {
'name': ('name',None),
'item': ('item',None),
'offset': ('offset',None),
'format': ('format',None),
}
def _addAttributeNames(m):
K = m.keys()
for k in K:
n = m[k][0]
if n not in m: m[n] = m[k]
n = string.lower(n)
if n not in m: m[n] = m[k]
_addAttributeNames(_paraAttrMap)
_addAttributeNames(_fontAttrMap)
_addAttributeNames(_bulletAttrMap)
_addAttributeNames(_anchorAttrMap)
_addAttributeNames(_linkAttrMap)
def _applyAttributes(obj, attr):
for k, v in attr.items():
if type(v) is TupleType and v[0]=='relative':
#AR 20/5/2000 - remove 1.5.2-ism
#v = v[1]+getattr(obj,k,0)
if hasattr(obj, k):
v = v[1]+getattr(obj,k)
else:
v = v[1]
setattr(obj,k,v)
#Named character entities intended to be supported from the special font
#with additions suggested by Christoph Zwerschke who also suggested the
#numeric entity names that follow.
greeks = {
'Aacute': '\xc3\x81',
'aacute': '\xc3\xa1',
'Acirc': '\xc3\x82',
'acirc': '\xc3\xa2',
'acute': '\xc2\xb4',
'AElig': '\xc3\x86',
'aelig': '\xc3\xa6',
'Agrave': '\xc3\x80',
'agrave': '\xc3\xa0',
'alefsym': '\xe2\x84\xb5',
'Alpha': '\xce\x91',
'alpha': '\xce\xb1',
'and': '\xe2\x88\xa7',
'ang': '\xe2\x88\xa0',
'Aring': '\xc3\x85',
'aring': '\xc3\xa5',
'asymp': '\xe2\x89\x88',
'Atilde': '\xc3\x83',
'atilde': '\xc3\xa3',
'Auml': '\xc3\x84',
'auml': '\xc3\xa4',
'bdquo': '\xe2\x80\x9e',
'Beta': '\xce\x92',
'beta': '\xce\xb2',
'brvbar': '\xc2\xa6',
'bull': '\xe2\x80\xa2',
'cap': '\xe2\x88\xa9',
'Ccedil': '\xc3\x87',
'ccedil': '\xc3\xa7',
'cedil': '\xc2\xb8',
'cent': '\xc2\xa2',
'Chi': '\xce\xa7',
'chi': '\xcf\x87',
'circ': '\xcb\x86',
'clubs': '\xe2\x99\xa3',
'cong': '\xe2\x89\x85',
'copy': '\xc2\xa9',
'crarr': '\xe2\x86\xb5',
'cup': '\xe2\x88\xaa',
'curren': '\xc2\xa4',
'dagger': '\xe2\x80\xa0',
'Dagger': '\xe2\x80\xa1',
'darr': '\xe2\x86\x93',
'dArr': '\xe2\x87\x93',
'deg': '\xc2\xb0',
'delta': '\xce\xb4',
'Delta': '\xe2\x88\x86',
'diams': '\xe2\x99\xa6',
'divide': '\xc3\xb7',
'Eacute': '\xc3\x89',
'eacute': '\xc3\xa9',
'Ecirc': '\xc3\x8a',
'ecirc': '\xc3\xaa',
'Egrave': '\xc3\x88',
'egrave': '\xc3\xa8',
'empty': '\xe2\x88\x85',
'emsp': '\xe2\x80\x83',
'ensp': '\xe2\x80\x82',
'Epsilon': '\xce\x95',
'epsilon': '\xce\xb5',
'epsiv': '\xce\xb5',
'equiv': '\xe2\x89\xa1',
'Eta': '\xce\x97',
'eta': '\xce\xb7',
'ETH': '\xc3\x90',
'eth': '\xc3\xb0',
'Euml': '\xc3\x8b',
'euml': '\xc3\xab',
'euro': '\xe2\x82\xac',
'exist': '\xe2\x88\x83',
'fnof': '\xc6\x92',
'forall': '\xe2\x88\x80',
'frac12': '\xc2\xbd',
'frac14': '\xc2\xbc',
'frac34': '\xc2\xbe',
'frasl': '\xe2\x81\x84',
'Gamma': '\xce\x93',
'gamma': '\xce\xb3',
'ge': '\xe2\x89\xa5',
'harr': '\xe2\x86\x94',
'hArr': '\xe2\x87\x94',
'hearts': '\xe2\x99\xa5',
'hellip': '\xe2\x80\xa6',
'Iacute': '\xc3\x8d',
'iacute': '\xc3\xad',
'Icirc': '\xc3\x8e',
'icirc': '\xc3\xae',
'iexcl': '\xc2\xa1',
'Igrave': '\xc3\x8c',
'igrave': '\xc3\xac',
'image': '\xe2\x84\x91',
'infin': '\xe2\x88\x9e',
'int': '\xe2\x88\xab',
'Iota': '\xce\x99',
'iota': '\xce\xb9',
'iquest': '\xc2\xbf',
'isin': '\xe2\x88\x88',
'Iuml': '\xc3\x8f',
'iuml': '\xc3\xaf',
'Kappa': '\xce\x9a',
'kappa': '\xce\xba',
'Lambda': '\xce\x9b',
'lambda': '\xce\xbb',
'lang': '\xe2\x8c\xa9',
'laquo': '\xc2\xab',
'larr': '\xe2\x86\x90',
'lArr': '\xe2\x87\x90',
'lceil': '\xef\xa3\xae',
'ldquo': '\xe2\x80\x9c',
'le': '\xe2\x89\xa4',
'lfloor': '\xef\xa3\xb0',
'lowast': '\xe2\x88\x97',
'loz': '\xe2\x97\x8a',
'lrm': '\xe2\x80\x8e',
'lsaquo': '\xe2\x80\xb9',
'lsquo': '\xe2\x80\x98',
'macr': '\xc2\xaf',
'mdash': '\xe2\x80\x94',
'micro': '\xc2\xb5',
'middot': '\xc2\xb7',
'minus': '\xe2\x88\x92',
'mu': '\xc2\xb5',
'Mu': '\xce\x9c',
'nabla': '\xe2\x88\x87',
'nbsp': '\xc2\xa0',
'ndash': '\xe2\x80\x93',
'ne': '\xe2\x89\xa0',
'ni': '\xe2\x88\x8b',
'notin': '\xe2\x88\x89',
'not': '\xc2\xac',
'nsub': '\xe2\x8a\x84',
'Ntilde': '\xc3\x91',
'ntilde': '\xc3\xb1',
'Nu': '\xce\x9d',
'nu': '\xce\xbd',
'Oacute': '\xc3\x93',
'oacute': '\xc3\xb3',
'Ocirc': '\xc3\x94',
'ocirc': '\xc3\xb4',
'OElig': '\xc5\x92',
'oelig': '\xc5\x93',
'Ograve': '\xc3\x92',
'ograve': '\xc3\xb2',
'oline': '\xef\xa3\xa5',
'omega': '\xcf\x89',
'Omega': '\xe2\x84\xa6',
'Omicron': '\xce\x9f',
'omicron': '\xce\xbf',
'oplus': '\xe2\x8a\x95',
'ordf': '\xc2\xaa',
'ordm': '\xc2\xba',
'or': '\xe2\x88\xa8',
'Oslash': '\xc3\x98',
'oslash': '\xc3\xb8',
'Otilde': '\xc3\x95',
'otilde': '\xc3\xb5',
'otimes': '\xe2\x8a\x97',
'Ouml': '\xc3\x96',
'ouml': '\xc3\xb6',
'para': '\xc2\xb6',
'part': '\xe2\x88\x82',
'permil': '\xe2\x80\xb0',
'perp': '\xe2\x8a\xa5',
'phis': '\xcf\x86',
'Phi': '\xce\xa6',
'phi': '\xcf\x95',
'piv': '\xcf\x96',
'Pi': '\xce\xa0',
'pi': '\xcf\x80',
'plusmn': '\xc2\xb1',
'pound': '\xc2\xa3',
'prime': '\xe2\x80\xb2',
'Prime': '\xe2\x80\xb3',
'prod': '\xe2\x88\x8f',
'prop': '\xe2\x88\x9d',
'Psi': '\xce\xa8',
'psi': '\xcf\x88',
'radic': '\xe2\x88\x9a',
'rang': '\xe2\x8c\xaa',
'raquo': '\xc2\xbb',
'rarr': '\xe2\x86\x92',
'rArr': '\xe2\x87\x92',
'rceil': '\xef\xa3\xb9',
'rdquo': '\xe2\x80\x9d',
'real': '\xe2\x84\x9c',
'reg': '\xc2\xae',
'rfloor': '\xef\xa3\xbb',
'Rho': '\xce\xa1',
'rho': '\xcf\x81',
'rlm': '\xe2\x80\x8f',
'rsaquo': '\xe2\x80\xba',
'rsquo': '\xe2\x80\x99',
'sbquo': '\xe2\x80\x9a',
'Scaron': '\xc5\xa0',
'scaron': '\xc5\xa1',
'sdot': '\xe2\x8b\x85',
'sect': '\xc2\xa7',
'shy': '\xc2\xad',
'sigmaf': '\xcf\x82',
'sigmav': '\xcf\x82',
'Sigma': '\xce\xa3',
'sigma': '\xcf\x83',
'sim': '\xe2\x88\xbc',
'spades': '\xe2\x99\xa0',
'sube': '\xe2\x8a\x86',
'sub': '\xe2\x8a\x82',
'sum': '\xe2\x88\x91',
'sup1': '\xc2\xb9',
'sup2': '\xc2\xb2',
'sup3': '\xc2\xb3',
'supe': '\xe2\x8a\x87',
'sup': '\xe2\x8a\x83',
'szlig': '\xc3\x9f',
'Tau': '\xce\xa4',
'tau': '\xcf\x84',
'there4': '\xe2\x88\xb4',
'thetasym': '\xcf\x91',
'thetav': '\xcf\x91',
'Theta': '\xce\x98',
'theta': '\xce\xb8',
'thinsp': '\xe2\x80\x89',
'THORN': '\xc3\x9e',
'thorn': '\xc3\xbe',
'tilde': '\xcb\x9c',
'times': '\xc3\x97',
'trade': '\xef\xa3\xaa',
'Uacute': '\xc3\x9a',
'uacute': '\xc3\xba',
'uarr': '\xe2\x86\x91',
'uArr': '\xe2\x87\x91',
'Ucirc': '\xc3\x9b',
'ucirc': '\xc3\xbb',
'Ugrave': '\xc3\x99',
'ugrave': '\xc3\xb9',
'uml': '\xc2\xa8',
'upsih': '\xcf\x92',
'Upsilon': '\xce\xa5',
'upsilon': '\xcf\x85',
'Uuml': '\xc3\x9c',
'uuml': '\xc3\xbc',
'weierp': '\xe2\x84\x98',
'Xi': '\xce\x9e',
'xi': '\xce\xbe',
'Yacute': '\xc3\x9d',
'yacute': '\xc3\xbd',
'yen': '\xc2\xa5',
'yuml': '\xc3\xbf',
'Yuml': '\xc5\xb8',
'Zeta': '\xce\x96',
'zeta': '\xce\xb6',
'zwj': '\xe2\x80\x8d',
'zwnj': '\xe2\x80\x8c',
}
#------------------------------------------------------------------------
class ParaFrag(ABag):
"""class ParaFrag contains the intermediate representation of string
segments as they are being parsed by the XMLParser.
fontname, fontSize, rise, textColor, cbDefn
"""
_greek2Utf8=None
def _greekConvert(data):
global _greek2Utf8
if not _greek2Utf8:
from reportlab.pdfbase.rl_codecs import RL_Codecs
import codecs
dm = decoding_map = codecs.make_identity_dict(xrange(32,256))
for k in xrange(0,32):
dm[k] = None
dm.update(RL_Codecs._RL_Codecs__rl_codecs_data['symbol'][0])
_greek2Utf8 = {}
for k,v in dm.iteritems():
if not v:
u = '\0'
else:
u = unichr(v).encode('utf8')
_greek2Utf8[chr(k)] = u
return ''.join(map(_greek2Utf8.__getitem__,data))
#------------------------------------------------------------------
# !!! NOTE !!! THIS TEXT IS NOW REPLICATED IN PARAGRAPH.PY !!!
# The ParaFormatter will be able to format the following
# tags:
# < /b > - bold
# < /i > - italics
# < u > < /u > - underline
# < strike > < /strike > - strike through
# < super > < /super > - superscript
# < sup > < /sup > - superscript
# < sub > < /sub > - subscript
# <font name=fontfamily/fontname color=colorname size=float>
# < bullet > </bullet> - bullet text (at head of para only)
# <onDraw name=callable label="a label"/>
# <index [name="callablecanvasattribute"] label="a label"/>
# <link>link text</link>
# attributes of links
# size/fontSize=num
# name/face/fontName=name
# fg/textColor/color=color
# backcolor/backColor/bgcolor=color
# dest/destination/target/href/link=target
# <a>anchor text</a>
# attributes of anchors
# fontSize=num
# fontName=name
# fg/textColor/color=color
# backcolor/backColor/bgcolor=color
# href=href
# <a name="anchorpoint"/>
# <unichar name="unicode character name"/>
# <unichar value="unicode code point"/>
# <img src="path" width="1in" height="1in" valign="bottom"/>
# width="w%" --> fontSize*w/100 idea from Roberto Alsina
# height="h%" --> linewidth*h/100 <ralsina@netmanagers.com.ar>
# <greek> - </greek>
#
# The whole may be surrounded by <para> </para> tags
#
# It will also be able to handle any MathML specified Greek characters.
#------------------------------------------------------------------
class ParaParser(xmllib.XMLParser):
#----------------------------------------------------------
# First we will define all of the xml tag handler functions.
#
# start_<tag>(attributes)
# end_<tag>()
#
# While parsing the xml ParaFormatter will call these
# functions to handle the string formatting tags.
# At the start of each tag the corresponding field will
# be set to 1 and at the end tag the corresponding field will
# be set to 0. Then when handle_data is called the options
# for that data will be aparent by the current settings.
#----------------------------------------------------------
def __getattr__( self, attrName ):
"""This way we can handle <TAG> the same way as <tag> (ignoring case)."""
if attrName!=attrName.lower() and attrName!="caseSensitive" and not self.caseSensitive and \
(attrName.startswith("start_") or attrName.startswith("end_")):
return getattr(self,attrName.lower())
raise AttributeError, attrName
#### bold
def start_b( self, attributes ):
self._push(bold=1)
def end_b( self ):
self._pop(bold=1)
def start_strong( self, attributes ):
self._push(bold=1)
def end_strong( self ):
self._pop(bold=1)
#### italics
def start_i( self, attributes ):
self._push(italic=1)
def end_i( self ):
self._pop(italic=1)
def start_em( self, attributes ):
self._push(italic=1)
def end_em( self ):
self._pop(italic=1)
#### underline
def start_u( self, attributes ):
self._push(underline=1)
def end_u( self ):
self._pop(underline=1)
#### strike
def start_strike( self, attributes ):
self._push(strike=1)
def end_strike( self ):
self._pop(strike=1)
#### link
def start_link(self, attributes):
self._push(**self.getAttributes(attributes,_linkAttrMap))
def end_link(self):
frag = self._stack[-1]
del self._stack[-1]
assert frag.link!=None
#### anchor
def start_a(self, attributes):
A = self.getAttributes(attributes,_anchorAttrMap)
name = A.get('name',None)
if name is not None:
name = name.strip()
if not name:
self._syntax_error('<a name="..."/> anchor variant requires non-blank name')
if len(A)>1:
self._syntax_error('<a name="..."/> anchor variant only allows name attribute')
A = dict(name=A['name'])
A['_selfClosingTag'] = 'anchor'
else:
href = A.get('href','').strip()
if not href:
self._syntax_error('<a> tag must have non-blank name or href attribute')
A['link'] = href #convert to our link form
A.pop('href')
self._push(**A)
def end_a(self):
frag = self._stack[-1]
sct = getattr(frag,'_selfClosingTag','')
if sct:
assert sct=='anchor' and frag.name,'Parser failure in <a/>'
defn = frag.cbDefn = ABag()
defn.label = defn.kind = 'anchor'
defn.name = frag.name
del frag.name, frag._selfClosingTag
self.handle_data('')
self._pop()
else:
del self._stack[-1]
assert frag.link!=None
def start_img(self,attributes):
A = self.getAttributes(attributes,_imgAttrMap)
if not A.get('src'):
self._syntax_error('<img> needs src attribute')
A['_selfClosingTag'] = 'img'
self._push(**A)
def end_img(self):
frag = self._stack[-1]
assert getattr(frag,'_selfClosingTag',''),'Parser failure in <img/>'
defn = frag.cbDefn = ABag()
defn.kind = 'img'
defn.src = getattr(frag,'src',None)
defn.image = ImageReader(defn.src)
size = defn.image.getSize()
defn.width = getattr(frag,'width',size[0])
defn.height = getattr(frag,'height',size[1])
defn.valign = getattr(frag,'valign','bottom')
del frag._selfClosingTag
self.handle_data('')
self._pop()
#### super script
def start_super( self, attributes ):
self._push(super=1)
def end_super( self ):
self._pop(super=1)
start_sup = start_super
end_sup = end_super
#### sub script
def start_sub( self, attributes ):
self._push(sub=1)
def end_sub( self ):
self._pop(sub=1)
#### greek script
#### add symbol encoding
def handle_charref(self, name):
try:
if name[0]=='x':
n = int(name[1:],16)
else:
n = int(name)
except ValueError:
self.unknown_charref(name)
return
self.handle_data(unichr(n).encode('utf8'))
def handle_entityref(self,name):
if name in greeks:
self.handle_data(greeks[name])
else:
xmllib.XMLParser.handle_entityref(self,name)
def syntax_error(self,lineno,message):
self._syntax_error(message)
def _syntax_error(self,message):
if message[:10]=="attribute " and message[-17:]==" value not quoted": return
self.errors.append(message)
def start_greek(self, attr):
self._push(greek=1)
def end_greek(self):
self._pop(greek=1)
def start_unichar(self, attr):
if 'name' in attr:
if 'code' in attr:
self._syntax_error('<unichar/> invalid with both name and code attributes')
try:
v = unicodedata.lookup(attr['name']).encode('utf8')
except KeyError:
self._syntax_error('<unichar/> invalid name attribute\n"%s"' % name)
v = '\0'
elif 'code' in attr:
try:
v = unichr(int(eval(attr['code']))).encode('utf8')
except:
self._syntax_error('<unichar/> invalid code attribute %s' % attr['code'])
v = '\0'
else:
v = None
if attr:
self._syntax_error('<unichar/> invalid attribute %s' % attr.keys()[0])
if v is not None:
self.handle_data(v)
self._push(_selfClosingTag='unichar')
def end_unichar(self):
self._pop()
def start_font(self,attr):
self._push(**self.getAttributes(attr,_fontAttrMap))
def end_font(self):
self._pop()
def start_br(self, attr):
#just do the trick to make sure there is no content
self._push(_selfClosingTag='br',lineBreak=True,text='')
def end_br(self):
frag = self._stack[-1]
assert frag._selfClosingTag=='br' and frag.lineBreak,'Parser failure in <br/>'
del frag._selfClosingTag
self.handle_data('')
self._pop()
def _initial_frag(self,attr,attrMap,bullet=0):
style = self._style
if attr!={}:
style = copy.deepcopy(style)
_applyAttributes(style,self.getAttributes(attr,attrMap))
self._style = style
# initialize semantic values
frag = ParaFrag()
frag.sub = 0
frag.super = 0
frag.rise = 0
frag.underline = 0
frag.strike = 0
frag.greek = 0
frag.link = None
if bullet:
frag.fontName, frag.bold, frag.italic = ps2tt(style.bulletFontName)
frag.fontSize = style.bulletFontSize
frag.textColor = hasattr(style,'bulletColor') and style.bulletColor or style.textColor
else:
frag.fontName, frag.bold, frag.italic = ps2tt(style.fontName)
frag.fontSize = style.fontSize
frag.textColor = style.textColor
return frag
def start_para(self,attr):
self._stack = [self._initial_frag(attr,_paraAttrMap)]
def end_para(self):
self._pop()
def start_bullet(self,attr):
if hasattr(self,'bFragList'):
self._syntax_error('only one <bullet> tag allowed')
self.bFragList = []
frag = self._initial_frag(attr,_bulletAttrMap,1)
frag.isBullet = 1
self._stack.append(frag)
def end_bullet(self):
self._pop()
#---------------------------------------------------------------
def start_seqdefault(self, attr):
try:
default = attr['id']
except KeyError:
default = None
self._seq.setDefaultCounter(default)
def end_seqdefault(self):
pass
def start_seqreset(self, attr):
try:
id = attr['id']
except KeyError:
id = None
try:
base = int(attr['base'])
except:
base=0
self._seq.reset(id, base)
def end_seqreset(self):
pass
def start_seqchain(self, attr):
try:
order = attr['order']
except KeyError:
order = ''
order = order.split()
seq = self._seq
for p,c in zip(order[:-1],order[1:]):
seq.chain(p, c)
end_seqchain = end_seqreset
def start_seqformat(self, attr):
try:
id = attr['id']
except KeyError:
id = None
try:
value = attr['value']
except KeyError:
value = '1'
self._seq.setFormat(id,value)
end_seqformat = end_seqreset
# AR hacking in aliases to allow the proper casing for RML.
# the above ones should be deprecated over time. 2001-03-22
start_seqDefault = start_seqdefault
end_seqDefault = end_seqdefault
start_seqReset = start_seqreset
end_seqReset = end_seqreset
start_seqChain = start_seqchain
end_seqChain = end_seqchain
start_seqFormat = start_seqformat
end_seqFormat = end_seqformat
def start_seq(self, attr):
#if it has a template, use that; otherwise try for id;
#otherwise take default sequence
if 'template' in attr:
templ = attr['template']
self.handle_data(templ % self._seq)
return
elif 'id' in attr:
id = attr['id']
else:
id = None
increment = attr.get('inc', None)
if not increment:
output = self._seq.nextf(id)
else:
#accepts "no" for do not increment, or an integer.
#thus, 0 and 1 increment by the right amounts.
if increment.lower() == 'no':
output = self._seq.thisf(id)
else:
incr = int(increment)
output = self._seq.thisf(id)
self._seq.reset(id, self._seq._this() + incr)
self.handle_data(output)
def end_seq(self):
pass
def start_onDraw(self,attr):
defn = ABag()
if 'name' in attr: defn.name = attr['name']
else: self._syntax_error('<onDraw> needs at least a name attribute')
if 'label' in attr: defn.label = attr['label']
defn.kind='onDraw'
self._push(cbDefn=defn)
self.handle_data('')
self._pop()
end_onDraw=end_seq
def start_index(self,attr):
attr=self.getAttributes(attr,_indexAttrMap)
defn = ABag()
if 'item' in attr:
label = attr['item']
else:
self._syntax_error('<index> needs at least an item attribute')
if 'name' in attr:
name = attr['name']
else:
name = DEFAULT_INDEX_NAME
format = attr.get('format',None)
if format is not None and format not in ('123','I','i','ABC','abc'):
raise ValueError('index tag format is %r not valid 123 I i ABC or abc' % offset)
offset = attr.get('offset',None)
if offset is not None:
try:
offset = int(offset)
except:
raise ValueError('index tag offset is %r not an int' % offset)
defn.label = base64.encodestring(pickle.dumps((label,format,offset))).strip()
defn.name = name
defn.kind='index'
self._push(cbDefn=defn)
self.handle_data('')
self._pop()
end_index=end_seq
#---------------------------------------------------------------
def _push(self,**attr):
frag = copy.copy(self._stack[-1])
_applyAttributes(frag,attr)
self._stack.append(frag)
def _pop(self,**kw):
frag = self._stack[-1]
del self._stack[-1]
for k, v in kw.items():
assert getattr(frag,k)==v
return frag
def getAttributes(self,attr,attrMap):
A = {}
for k, v in attr.items():
if not self.caseSensitive:
k = string.lower(k)
if k in attrMap.keys():
j = attrMap[k]
func = j[1]
try:
A[j[0]] = (func is None) and v or func(v)
except:
self._syntax_error('%s: invalid value %s'%(k,v))
else:
self._syntax_error('invalid attribute name %s'%k)
return A
#----------------------------------------------------------------
def __init__(self,verbose=0):
self.caseSensitive = 0
xmllib.XMLParser.__init__(self,verbose=verbose)
def _iReset(self):
self.fragList = []
if hasattr(self, 'bFragList'): delattr(self,'bFragList')
def _reset(self, style):
'''reset the parser'''
xmllib.XMLParser.reset(self)
# initialize list of string segments to empty
self.errors = []
self._style = style
self._iReset()
#----------------------------------------------------------------
def handle_data(self,data):
"Creates an intermediate representation of string segments."
frag = copy.copy(self._stack[-1])
if hasattr(frag,'cbDefn'):
kind = frag.cbDefn.kind
if data: self._syntax_error('Only empty <%s> tag allowed' % kind)
elif hasattr(frag,'_selfClosingTag'):
if data!='': self._syntax_error('No content allowed in %s tag' % frag._selfClosingTag)
return
else:
# if sub and super are both on they will cancel each other out
if frag.sub == 1 and frag.super == 1:
frag.sub = 0
frag.super = 0
if frag.sub:
frag.rise = -frag.fontSize*subFraction
frag.fontSize = max(frag.fontSize-sizeDelta,3)
elif frag.super:
frag.rise = frag.fontSize*superFraction
frag.fontSize = max(frag.fontSize-sizeDelta,3)
if frag.greek:
frag.fontName = 'symbol'
data = _greekConvert(data)
# bold, italic, and underline
frag.fontName = tt2ps(frag.fontName,frag.bold,frag.italic)
#save our data
frag.text = data
if hasattr(frag,'isBullet'):
delattr(frag,'isBullet')
self.bFragList.append(frag)
else:
self.fragList.append(frag)
def handle_cdata(self,data):
self.handle_data(data)
def _setup_for_parse(self,style):
self._seq = reportlab.lib.sequencer.getSequencer()
self._reset(style) # reinitialise the parser
def parse(self, text, style):
"""Given a formatted string will return a list of
ParaFrag objects with their calculated widths.
If errors occur None will be returned and the
self.errors holds a list of the error messages.
"""
# AR 20040612 - when we feed Unicode strings in, sgmlop
# tries to coerce to ASCII. Must intercept, coerce to
# any 8-bit encoding which defines most of 256 points,
# and revert at end. Yuk. Preliminary step prior to
# removal of parser altogether.
enc = self._enc = 'utf8' #our legacy default
self._UNI = type(text) is UnicodeType
if self._UNI:
text = text.encode(enc)
self._setup_for_parse(style)
# the xmlparser requires that all text be surrounded by xml
# tags, therefore we must throw some unused flags around the
# given string
if not(len(text)>=6 and text[0]=='<' and _re_para.match(text)):
text = "<para>"+text+"</para>"
self.feed(text)
self.close() # force parsing to complete
return self._complete_parse()
def _complete_parse(self):
del self._seq
style = self._style
del self._style
if len(self.errors)==0:
fragList = self.fragList
bFragList = hasattr(self,'bFragList') and self.bFragList or None
self._iReset()
else:
fragList = bFragList = None
if self._UNI:
#reconvert to unicode
if fragList:
for frag in fragList:
frag.text = unicode(frag.text, self._enc)
if bFragList:
for frag in bFragList:
frag.text = unicode(frag.text, self._enc)
return style, fragList, bFragList
def _tt_parse(self,tt):
tag = tt[0]
try:
start = getattr(self,'start_'+tag)
end = getattr(self,'end_'+tag)
except AttributeError:
raise ValueError('Invalid tag "%s"' % tag)
start(tt[1] or {})
C = tt[2]
if C:
M = self._tt_handlers
for c in C:
M[type(c) is TupleType](c)
end()
def tt_parse(self,tt,style):
'''parse from tupletree form'''
self._setup_for_parse(style)
self._tt_handlers = self.handle_data,self._tt_parse
self._tt_parse(tt)
return self._complete_parse()
if __name__=='__main__':
from reportlab.platypus import cleanBlockQuotedText
from reportlab.lib.styles import _baseFontName
_parser=ParaParser()
def check_text(text,p=_parser):
print '##########'
text = cleanBlockQuotedText(text)
l,rv,bv = p.parse(text,style)
if rv is None:
for l in _parser.errors:
print l
else:
print 'ParaStyle', l.fontName,l.fontSize,l.textColor
for l in rv:
print l.fontName,l.fontSize,l.textColor,l.bold, l.rise, '|%s|'%l.text[:25],
if hasattr(l,'cbDefn'):
print 'cbDefn',getattr(l.cbDefn,'name',''),getattr(l.cbDefn,'label',''),l.cbDefn.kind
else: print
style=ParaFrag()
style.fontName=_baseFontName
style.fontSize = 12
style.textColor = black
style.bulletFontName = black
style.bulletFontName=_baseFontName
style.bulletFontSize=12
text='''
<b><i><greek>a</greek>D</i></b>β<unichr value="0x394"/>
<font name="helvetica" size="15" color=green>
Tell me, O muse, of that ingenious hero who travelled far and wide
after</font> he had sacked the famous town of Troy. Many cities did he visit,
and many were the nations with whose manners and customs he was acquainted;
moreover he suffered much by sea while trying to save his own life
and bring his men safely home; but do what he might he could not save
his men, for they perished through their own sheer folly in eating
the cattle of the Sun-god Hyperion; so the god prevented them from
ever reaching home. Tell me, too, about all these things, O daughter
of Jove, from whatsoever source you<super>1</super> may know them.
'''
check_text(text)
check_text('<para> </para>')
check_text('<para font="%s" size=24 leading=28.8 spaceAfter=72>ReportLab -- Reporting for the Internet Age</para>'%_baseFontName)
check_text('''
<font color=red>τ</font>Tell me, O muse, of that ingenious hero who travelled far and wide
after he had sacked the famous town of Troy. Many cities did he visit,
and many were the nations with whose manners and customs he was acquainted;
moreover he suffered much by sea while trying to save his own life
and bring his men safely home; but do what he might he could not save
his men, for they perished through their own sheer folly in eating
the cattle of the Sun-god Hyperion; so the god prevented them from
ever reaching home. Tell me, too, about all these things, O daughter
of Jove, from whatsoever source you may know them.''')
check_text('''
Telemachus took this speech as of good omen and rose at once, for
he was bursting with what he had to say. He stood in the middle of
the assembly and the good herald Pisenor brought him his staff. Then,
turning to Aegyptius, "Sir," said he, "it is I, as you will shortly
learn, who have convened you, for it is I who am the most aggrieved.
I have not got wind of any host approaching about which I would warn
you, nor is there any matter of public moment on which I would speak.
My grieveance is purely personal, and turns on two great misfortunes
which have fallen upon my house. The first of these is the loss of
my excellent father, who was chief among all you here present, and
was like a father to every one of you; the second is much more serious,
and ere long will be the utter ruin of my estate. The sons of all
the chief men among you are pestering my mother to marry them against
her will. They are afraid to go to her father Icarius, asking him
to choose the one he likes best, and to provide marriage gifts for
his daughter, but day by day they keep hanging about my father's house,
sacrificing our oxen, sheep, and fat goats for their banquets, and
never giving so much as a thought to the quantity of wine they drink.
No estate can stand such recklessness; we have now no Ulysses to ward
off harm from our doors, and I cannot hold my own against them. I
shall never all my days be as good a man as he was, still I would
indeed defend myself if I had power to do so, for I cannot stand such
treatment any longer; my house is being disgraced and ruined. Have
respect, therefore, to your own consciences and to public opinion.
Fear, too, the wrath of heaven, lest the gods should be displeased
and turn upon you. I pray you by Jove and Themis, who is the beginning
and the end of councils, [do not] hold back, my friends, and leave
me singlehanded- unless it be that my brave father Ulysses did some
wrong to the Achaeans which you would now avenge on me, by aiding
and abetting these suitors. Moreover, if I am to be eaten out of house
and home at all, I had rather you did the eating yourselves, for I
could then take action against you to some purpose, and serve you
with notices from house to house till I got paid in full, whereas
now I have no remedy."''')
check_text('''
But as the sun was rising from the fair sea into the firmament of
heaven to shed light on mortals and immortals, they reached Pylos
the city of Neleus. Now the people of Pylos were gathered on the sea
shore to offer sacrifice of black bulls to Neptune lord of the Earthquake.
There were nine guilds with five hundred men in each, and there were
nine bulls to each guild. As they were eating the inward meats and
burning the thigh bones [on the embers] in the name of Neptune, Telemachus
and his crew arrived, furled their sails, brought their ship to anchor,
and went ashore. ''')
check_text('''
So the neighbours and kinsmen of Menelaus were feasting and making
merry in his house. There was a bard also to sing to them and play
his lyre, while two tumblers went about performing in the midst of
them when the man struck up with his tune.]''')
check_text('''
"When we had passed the [Wandering] rocks, with Scylla and terrible
Charybdis, we reached the noble island of the sun-god, where were
the goodly cattle and sheep belonging to the sun Hyperion. While still
at sea in my ship I could bear the cattle lowing as they came home
to the yards, and the sheep bleating. Then I remembered what the blind
Theban prophet Teiresias had told me, and how carefully Aeaean Circe
had warned me to shun the island of the blessed sun-god. So being
much troubled I said to the men, 'My men, I know you are hard pressed,
but listen while I <strike>tell you the prophecy that</strike> Teiresias made me, and
how carefully Aeaean Circe warned me to shun the island of the blessed
sun-god, for it was here, she said, that our worst danger would lie.
Head the ship, therefore, away from the island.''')
check_text('''A<B>C&D"E'F''')
check_text('''A< B> C& D" E' F''')
check_text('''<![CDATA[<>&'"]]>''')
check_text('''<bullet face=courier size=14 color=green>+</bullet>
There was a bard also to sing to them and play
his lyre, while two tumblers went about performing in the midst of
them when the man struck up with his tune.]''')
check_text('''<onDraw name="myFunc" label="aaa bbb">A paragraph''')
check_text('''<para><onDraw name="myFunc" label="aaa bbb">B paragraph</para>''')
# HVB, 30.05.2003: Test for new features
_parser.caseSensitive=0
check_text('''Here comes <FONT FACE="Helvetica" SIZE="14pt">Helvetica 14</FONT> with <STRONG>strong</STRONG> <EM>emphasis</EM>.''')
check_text('''Here comes <font face="Helvetica" size="14pt">Helvetica 14</font> with <Strong>strong</Strong> <em>emphasis</em>.''')
check_text('''Here comes <font face="Courier" size="3cm">Courier 3cm</font> and normal again.''')
check_text('''Before the break <br/>the middle line <br/> and the last line.''')
check_text('''This should be an inline image <img src='../docs/images/testimg.gif'/>!''')
check_text('''aaa bbbb <u>underline </u> cccc''')
| |
#!/usr/env/python
from collections import namedtuple
import sequence
# ================================================================
# Data Structures
# ================================================================
_fields = [
'fromSeq',
'startIdx',
'endIdx',
'label',
'data'
]
PatternInstance = namedtuple('PatternInstance', _fields)
def createPatternInstance(startIdx, endIdx, label=0, fromSeq=0, data=None):
return PatternInstance(fromSeq, startIdx, endIdx, label, data)
# ================================================================
# Functions
# ================================================================
def subseqsMatch(reported, truth, ignorePositions=False, ignoreLabel=False,
minOverlapFraction=.5, requireContainment=False):
seqId, ts, te, c, _ = reported
seqId2, ts2, te2, c2, _ = truth
gap2 = te2 - ts2
if seqId != seqId2: # false if seqs different
return False
if (not ignoreLabel) and (c != c2): # false if classes different
return False
if ignorePositions: # if ignoring positions, seqId and class were everything
if requireContainment:
raise ValueError("cannot simultaneously ignore positions and"
"require reported seqs to be contained in true seqs!")
return True
# sanity check start and end times
if te < ts:
print("Reported start index %d > end index %d", ts, te)
assert(False)
return
if te2 < ts2:
print("Ground truth start index %d > end index %d", ts2, te2)
assert(False)
return
if requireContainment: # reported seq must be completely contained by true seq
return ts >= ts2 and te <= te2
# we just got a single time step as a ground truth annotation,
# so return true if the reported seq includes this time step
if gap2 == 0:
return ts <= ts2 <= te
# false if no overlap
if te < ts2 or te2 < ts:
return False
overlapFraction = instancesIOU(reported, truth)
return overlapFraction >= minOverlapFraction
# return true iff both start and end are within (1 - minOverlapFraction)
# of true times
# maxDiff = gap2 * (1. - minOverlapFraction)
# return abs(ts - ts2) < maxDiff and abs(te - te2) < maxDiff
# print "subseqsMatch(): comparing boundaries of ", reported, truth
# isMatch = abs(ts - ts2) < maxDiff and abs(te - te2) < maxDiff
# print "match? ", isMatch
# return isMatch
def intersectionSize(ts, te, ts2, te2):
assert(te >= ts)
assert(te2 >= ts2)
if ts == te or ts2 == te2:
return 0
if te < ts2 and te2 < ts:
return 0
# ensure that ts <= ts2
if ts > ts2:
ts, te, ts2, te2 = ts2, te2, ts, te
if te <= te2:
# ------
# ------
return te - ts2
else:
# --------
# -----
return te2 - ts2
def instancesIntersectionSize(inst1, inst2):
return intersectionSize(inst1.startIdx, inst1.endIdx,
inst2.startIdx, inst2.endIdx)
def summedInstanceSizes(inst1, inst2):
return (inst1.endIdx - inst1.startIdx) + (inst2.endIdx - inst2.startIdx)
def instancesIOU(inst1, inst2):
interSize = float(instancesIntersectionSize(inst1, inst2))
summedSize = summedInstanceSizes(inst1, inst2)
unionSize = summedSize - interSize
return interSize / unionSize
def matchIgnoringPositions(reported, truth, **kwargs):
return subseqsMatch(reported, truth, ignorePositions=True, **kwargs)
def matchingSubseqs(reportedSeqs, trueSeqs, matchFunc=None, **matchFuncKwargs):
"""
Given the (seqId, start, end, class) tuples reported by a classifier and the
true (seqId, start, end, class) tuples, compute the index of which of the
true tuples each reported tuple corresponds to (-1 if none of them). Returns
these values as a list l such that l[i] = j, where j is the index of the
trueSeq which the ith reportedSeq matches.
"""
# make sure we have a func to test for matches
matchFunc = matchFunc or subseqsMatch
matches = []
matchesSet = set()
for i, repSeq in enumerate(reportedSeqs):
matchIdx = -1
for j, trueSeq in enumerate(trueSeqs):
if j in matchesSet:
continue
if matchFunc(repSeq, trueSeq, **matchFuncKwargs):
matchIdx = j
matchesSet.add(j)
break
matches.append(matchIdx)
return matches
def subseqIntersectionSizes(reportedSeqs, trueSeqs, matchFunc=None, ignoreLabel=False):
"""return the size of the intersection (in time steps) of each reportedSeq
with its best-matching trueSeq, ignoring duplicates"""
# make sure we have a func to test for matches
matchFunc = matchFunc or subseqsMatch
# intersectionSizes = np.zeros(len(reportedSeqs))
intersectionSizes = []
matchesSet = set()
for i, repSeq in enumerate(reportedSeqs):
matchIdx = -1
bestSize = 0
for j, truSeq in enumerate(trueSeqs):
if j in matchesSet:
continue
# check if these are from the same seq, same class, etc, but
# with no requirement for how much they must overlap
if matchFunc(repSeq, truSeq, minOverlapFraction=0.0001,
ignoreLabel=ignoreLabel):
sz = instancesIntersectionSize(repSeq, truSeq)
if sz > bestSize:
bestSize = sz
matchIdx = j
intersectionSizes.append(bestSize)
matchesSet.add(matchIdx)
return intersectionSizes
def computeNumMatches(reportedSeqs, trueSeqs, *args, **kwargs):
matches = matchingSubseqs(reportedSeqs, trueSeqs, *args, **kwargs)
numMatches = len(filter(lambda matchIdx: matchIdx >= 0, matches))
return numMatches
def totalInstancesSize(insts):
return sum([inst.endIdx - inst.startIdx for inst in insts])
def computeIOU(reportedSeqs, trueSeqs, ignoreLabel=False):
intersectionSizes = subseqIntersectionSizes(reportedSeqs, trueSeqs,
ignoreLabel=ignoreLabel)
intersectionSize = sum(intersectionSizes)
summedSize = totalInstancesSize(reportedSeqs) + totalInstancesSize(trueSeqs)
unionSize = summedSize - intersectionSize
return intersectionSize, unionSize, float(intersectionSize) / unionSize
def old_matchingSubseqs(reportedSeqs, trueSeqs, matchFunc=None):
"""
Given the (seqId, start, end, class) tuples reported by a classifier and the
true (seqId, start, end, class) tuples, compute which of the true tuples
each reported tuple corresponds to (-1 if none of them).
seqId is a unique ID for each input sequence, start and end are indices
within this sequence, and matchFunc is the function used to determine
whether a reported and ground truth tuple match. Tuples are split by
seqId, so matchFunc need only assess start and end indices and class. By
default, matchFunc defaults to subseqsMatch (also in this file).
Matches are assigned greedily from beginning to end, sorted by start index.
Returns a dict: seqId -> idxs of matching truth tuple (or -1) for each
reported tuple
"""
# make sure we have a func to test for matches
matchFunc = matchFunc or subseqsMatch
# group reported and true seqs by sequence id (in position 0)
seq2reported = sequence.splitElementsBy(lambda tup: tup[0], reportedSeqs)
seq2truth = sequence.splitElementsBy(lambda tup: tup[0], trueSeqs)
matchesDict = {}
for seqId, reported in seq2reported.iteritems():
truth = seq2truth.get(seqId)
if not truth: # ground truth has no instances in this sequence
continue
matches = []
# sort by start time
reported = sorted(reported, key=lambda x: x[1])
truth = sorted(truth, key=lambda x: x[1])
for i, repSeq in enumerate(reported):
matches.append(-1)
for j, trueSeq in enumerate(truth):
if matchFunc(repSeq, trueSeq):
matches[i] = j
del truth[j] # can't match the same thing twice
break
matchesDict[seqId] = matches
return matchesDict
def old_numMatchingSeqs(matchesDict):
numMatches = 0
for k, idxs in matchesDict.iteritems():
validIdxs = filter(lambda idx: idx >= 0, idxs)
numMatches += len(validIdxs)
return numMatches
# def classInstanceCountsInSeqs(reportedSeqs, trueSeqs, **sink):
# # group reported and true seqs by sequence id (in position 0)
# seq2reported = sequence.splitElementsBy(lambda tup: tup[0], reportedSeqs)
# seq2truth = sequence.splitElementsBy(lambda tup: tup[0], trueSeqs)
# seqId2ClassInstanceCounts = {}
# for seqId, reported in seq2reported.iteritems():
# truth = seq2truth.get(seqId)
# # create dict: class label -> sequences
# class2elements_reported = sequence.splitElementsBy(lambda seq: seq.label, reported)
# class2elements_truth = sequence.splitElementsBy(lambda seq: seq.label, truth)
# # dict: class label -> # instances
# classCounts_reported = sequence.applyToDict(lambda k, v: len(v), class2elements_reported)
# classCounts_truth = sequence.applyToDict(lambda k, v: len(v), class2elements_truth)
# seqId2ClassInstanceCounts[seqId] = (classCounts_reported, classCounts_truth)
# return seqId2ClassInstanceCounts
# def classStatsForSeqs(reportedClassCountsDict, truthClassCountsDict):
# reportedClasses = reportedClassCountsDict.keys()
# truthClasses = reportedClassCountsDict.keys()
# allClasses = sequence.uniqueElements(reportedClasses + truthClasses)
def precisionAndRecall(numReportedSeqs, numTrueSeqs, numMatchingSeqs):
if (not numReportedSeqs) or (not numTrueSeqs):
return 0., 0.
prec = float(numMatchingSeqs) / numReportedSeqs
rec = float(numMatchingSeqs) / numTrueSeqs
return prec, rec
def f1Score(precision, recall):
if (not precision) or (not recall):
return 0.
return 2. * precision * recall / (precision + recall)
def precisionRecallF1(numReported, numTrue, numMatches):
prec, rec = precisionAndRecall(numReported, numTrue, numMatches)
return prec, rec, f1Score(prec, rec)
# TODO refactor so less dup code with subseqMatchStats (and so returnMoreStats
# is documented)
def subseqIOUStats(reportedSeqs, trueSeqs, matchUpLabels=False, returnMoreStats=False):
if matchUpLabels: # might find pattern, but not know what to label it
lbl2reported = sequence.splitElementsBy(lambda inst: inst.label, reportedSeqs)
lbl2truth = sequence.splitElementsBy(lambda inst: inst.label, trueSeqs)
intersectionSize = 0.
unionSize = 0.
reportedSize = 0.
truthSize = 0.
for repLbl, repSeqs in lbl2reported.iteritems():
bestIntersection = 0
bestUnion = 0
bestReportedSize = 0
bestTruthSize = 0
bestIOU = 0.
for truthLbl, truthSeqs in lbl2truth.iteritems():
interSz, unionSz, iou = computeIOU(repSeqs, truthSeqs,
ignoreLabel=True)
if iou >= bestIOU: # so that ties for 0 will replace stuff
bestIntersection = interSz
bestUnion = unionSz
bestReportedSize = totalInstancesSize(repSeqs)
bestTruthSize = totalInstancesSize(truthSeqs)
bestIOU = iou
intersectionSize += bestIntersection
unionSize += bestUnion
reportedSize += bestReportedSize
truthSize += bestTruthSize
iou = float(intersectionSize) / unionSize
else:
intersectionSize, unionSize, iou = computeIOU(reportedSeqs, trueSeqs,
ignoreLabel=False)
reportedSize = totalInstancesSize(reportedSeqs)
truthSize = totalInstancesSize(trueSeqs)
if returnMoreStats:
return intersectionSize, unionSize, iou, reportedSize, truthSize
return intersectionSize, unionSize, iou
def subseqMatchStats(reportedSeqs, trueSeqs, matchFunc=None,
spoofNumReported=-1, spoofNumTrue=-1, ignorePositions=False,
matchUpLabels=False, matchAllClasses=False, minOverlapFraction=.5,
requireContainment=False, **sink):
"""[PatternInstance] x [PatternInstance] -> numMatches, numReported, numTrue"""
if (not matchFunc) and ignorePositions:
matchFunc = matchIgnoringPositions
if matchUpLabels: # might find pattern, but not know what to label it
lbl2reported = sequence.splitElementsBy(lambda inst: inst.label, reportedSeqs)
lbl2truth = sequence.splitElementsBy(lambda inst: inst.label, trueSeqs)
# print "subseqMatchStats: matching up with {} true labels".format(len(lbl2truth))
# print "subseqMatchStats: matching up {} reported with {} actual".format(
# len(reportedSeqs), len(trueSeqs))
# XXX if we report more than one label, there isn't necessarily a
# bijective mapping between reported and ground truth labels; it's
# possible to get more matches than true labels here, among other issues
numMatches = 0 # or avg overlap fraction (IOU)
numTrue = 0
for repLbl, repSeqs in lbl2reported.iteritems():
bestNumMatches = -1 # can't be 0 or numTrue stays unset if no matches
bestNumTruth = 0
for truthLbl, truthSeqs in lbl2truth.iteritems():
if len(truthSeqs) < 2: # ignore patterns that only happen once
continue
numMatchesForLabel = computeNumMatches(repSeqs, truthSeqs, matchFunc,
ignoreLabel=True, minOverlapFraction=minOverlapFraction,
requireContainment=requireContainment)
if numMatchesForLabel > bestNumMatches:
bestNumMatches = numMatchesForLabel
bestNumTruth = len(truthSeqs)
numMatches += max(0, bestNumMatches)
minNumTruth = min([len(truthSeqs) for _, truthSeqs in lbl2truth.iteritems()])
minNumTruth = max(minNumTruth, 2)
numTrue += max(minNumTruth, bestNumTruth)
else:
numMatches = computeNumMatches(reportedSeqs, trueSeqs, matchFunc,
minOverlapFraction=minOverlapFraction,
requireContainment=requireContainment)
if matchAllClasses:
numTrue = len(trueSeqs)
numReported = spoofNumReported if spoofNumReported >= 0 else len(reportedSeqs)
numTrue = spoofNumTrue if spoofNumTrue >= 0 else numTrue
return numReported, numTrue, numMatches
def scoreSubseqs(reportedSeqs, trueSeqs, **kwargs):
"""Returns (precision, recall, f1 score) based on matching via matchFunc"""
numMatches, numReported, numTrue = subseqMatchStats(reportedSeqs, trueSeqs, **kwargs)
prec, rec = precisionAndRecall(numReported, numTrue, numMatches)
return prec, rec, f1Score(prec, rec)
# ================================================================
# Main
# ================================================================
if __name__ == '__main__':
import doctest
doctest.testmod()
| |
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
Compute the reduced matching lattice vectors for heterostructure
interfaces as described in the paper by Zur and McGill:
Journal of Applied Physics 55, 378 (1984); doi: 10.1063/1.333084
"""
from six.moves import range
import sys, copy
from math import sqrt
import numpy as np
from pymatgen.core.structure import Structure, Lattice
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.standard_transformations import \
RotationTransformation
import os
__author__ = "Kiran Mathew, Arunima Singh, V. S. Chaitanya Kolluru"
__copyright__ = "Copyright 2018, Henniggroup"
__maintainer__ = "V. S. Chaitanya Kolluru"
__email__ = "chaitanya.ismu@gmail.com"
__status__ = "Production"
__date__ = "April 13, 2021"
def get_trans_matrices(n):
"""
Returns a list of 2x2 transformation matrices for the
given supercell
n: size of the supercell (area wise only)
"""
factors = []
for i in range(1, n + 1):
if n % i == 0:
factors.append(i)
trans_matrices = []
for i in factors:
m = n // i
tm = [[[i, j], [0, m]] for j in range(m)]
trans_matrices.append(tm)
all_tm = []
for i in range(0, len(trans_matrices)):
each_set = trans_matrices[i]
for j in range(0,len(each_set)):
all_tm.append(each_set[j])
return all_tm
def get_uv(ab, t_mat):
"""
Return u and v, the supercell lattice vectors obtained through the
transformation matrix
"""
u = np.array(ab[0]) * t_mat[0][0] + np.array(ab[1]) * t_mat[0][1]
v = np.array(ab[1]) * t_mat[1][1]
return [u, v]
def get_reduced_uv(uv, tm):
"""
Returns reduced lattice vectors
"""
is_not_reduced = True
u = np.array(uv[0])
v = np.array(uv[1])
tm1 = np.array(tm)
u1 = u.copy()
v1 = v.copy()
while is_not_reduced:
if np.dot(u, v) < 0:
v = -v
tm1[1] = -tm1[1]
if np.linalg.norm(u) > np.linalg.norm(v):
u1 = v.copy()
v1 = u.copy()
tm1c = tm1.copy()
tm1[0], tm1[1] = tm1c[1], tm1c[0]
elif np.linalg.norm(v) > np.linalg.norm(u + v):
v1 = v + u
tm1[1] = tm1[1] + tm1[0]
elif np.linalg.norm(v) > np.linalg.norm(u - v):
v1 = v - u
tm1[1] = tm1[1] - tm1[0]
else:
is_not_reduced = False
u = u1.copy()
v = v1.copy()
return [u, v], tm1
def reduced_supercell_vectors(ab, n):
"""
Returns all possible reduced in-plane lattice vectors and
transition matrices for the given starting unit cell lattice
vectors(ab) and the supercell size n
"""
uv_list = []
tm_list = []
for r_tm in get_trans_matrices(n):
uv = get_uv(ab, r_tm)
uv_r, tm0 = get_reduced_uv(uv, r_tm)
uv_list.append(uv_r)
tm_list.append(tm0)
return uv_list, tm_list
def get_r_list(area1, area2, max_area, tol=0.02):
"""
Returns a list of r1 and r2 values that satisfies:
r1/r2 = area2/area1 with the constraints:
r1 <= Area_max/area1 and r2 <= Area_max/area2
r1 and r2 corresponds to the supercell sizes of the 2 interfaces
that align them
"""
r_list = []
rmax1 = int(max_area / area1)
rmax2 = int(max_area / area2)
print('rmax1, rmax2: {0}, {1}\n'.format(rmax1, rmax2))
for r1 in range(1, rmax1 + 1):
for r2 in range(1, rmax2 + 1):
if abs(float(r1) * area1 - float(r2) * area2) / max_area <= tol:
r_list.append([r1, r2])
return r_list
def get_mismatch(a, b):
"""
Percentage mistmatch between the lattice vectors a and b
"""
a = np.array(a)
b = np.array(b)
return np.linalg.norm(b) / np.linalg.norm(a) - 1
def get_angle(a, b):
"""
Angle between lattice vectors a and b in degrees
"""
a = np.array(a)
b = np.array(b)
return np.arccos(
np.dot(a, b) / np.linalg.norm(a) / np.linalg.norm(b)) * 180 / np.pi
def surface_area(cell):
"""
Calculates the surface area of the Cell
"""
m = cell.lattice.matrix
return np.linalg.norm(np.cross(m[0], m[1]))
def get_area(uv):
"""
Returns area of the parallelogram, given a and b
"""
a = uv[0]
b = uv[1]
return np.linalg.norm(np.cross(a, b))
def remove_duplicates(uv_list, tm_list):
"""
Remove duplicates based on a, b, alpha matching.
"""
new_uv_list = []
new_tm_list = []
for sup_lat_n1, tm_n1 in zip(uv_list, tm_list):
a1 = [np.linalg.norm(i[0]) for i in sup_lat_n1]
b1 = [np.linalg.norm(i[1]) for i in sup_lat_n1]
angles = [get_angle(i[0], i[1]) for i in sup_lat_n1]
n1_lattices = [(a, b, alpha) for a, b, alpha in zip(a1, b1, angles)]
for lat in n1_lattices:
zround = np.array(n1_lattices).round(1)
zlist = zround.tolist()
zstr = np.array([str(j) for j in zlist])
zu, zind = np.unique(zstr, return_index = True)
unq_sup_lat = [sup_lat_n1[i] for i in zind]
unq_tm = [tm_n1[i] for i in zind]
new_uv_list.append(unq_sup_lat)
new_tm_list.append(unq_tm)
return new_uv_list, new_tm_list
def get_matching_lattices(iface1, iface2, max_area=100,
max_mismatch=0.01, max_angle_diff=1,
r1r2_tol=0.02, opt=False, best_match='area',
return_all_matches=None):
## This function gives: "return uv_opt[0], uv_opt[1]"
"""
computes a list of matching reduced lattice vectors that satify
the max_area, max_mismatch and max_anglele_diff criteria
If return_all_matches is set to True, it overwrites best_match option.
"""
if iface1 is None and iface2 is None:
# test : the numbers from the paper
a1 = 5.653
a2 = 6.481
# for 100 plane
ab1 = [[0, a1 / 2, -a1 / 2], [0, a1 / 2, a1 / 2]]
ab2 = [[0, a2 / 2, -a2 / 2], [0, a2 / 2, a2 / 2]]
area1 = a1 ** 2 / 2
area2 = a2 ** 2 / 2
# for 110 plane
ab1 = [[a1 / 2, -a1 / 2, 0], [0, 0, a1]]
ab2 = [[a2 / 2, -a2 / 2, 0], [0, 0, a2]]
area1 = a1 ** 2 / sqrt(2)
area2 = a2 ** 2 / sqrt(2)
# for 111 surface
# ab1 = [ [a1/2, 0, a1/2], [a1/2, a1/2, 0]]
# ab2 = [ [a2/2, 0, a2/2], [a2/2, a2/2, 0]]
# area1 = a1**2 * sqrt(3)/4 #/ 2 /sqrt(2)
# area2 = a2**2 * sqrt(3)/4 #/ 2 / sqrt(2)
else:
area1 = surface_area(iface1)
area2 = surface_area(iface2)
# ab1 is list of two lattice vectors that define the substrate lattice
# ab2 is that of 2d material
ab1 = [iface1.lattice.matrix[0, :], iface1.lattice.matrix[1, :]]
ab2 = [iface2.lattice.matrix[0, :], iface2.lattice.matrix[1, :]]
#print('initial values:\nuv1:\n{0}\nuv2:\n{1}\n '.format(ab1, ab2))
r_list = get_r_list(area1, area2, max_area, tol=r1r2_tol)
if not r_list:
print('r_list is empty. Try increasing the max surface '
'area or/and the other tolerance parameters')
return None, None
#sys.exit()
found = []
#print('searching ...')
uv1_list, tm1_list, uv2_list, tm2_list = [], [], [], []
for r1r2 in r_list:
x1, y1 = reduced_supercell_vectors(ab1, r1r2[0])
uv1_list.append(x1)
tm1_list.append(y1)
x2, y2 = reduced_supercell_vectors(ab2, r1r2[1])
uv2_list.append(x2)
tm2_list.append(y2)
if not uv1_list and not uv2_list:
continue
new_uv1, new_tm1 = remove_duplicates(uv1_list, tm1_list)
new_uv2, new_tm2 = remove_duplicates(uv2_list, tm2_list)
for sup_lat_n1, sup_lat_n2 in zip(new_uv1, new_uv2):
for i, uv1 in enumerate(sup_lat_n1):
for j, uv2 in enumerate(sup_lat_n2):
u_mismatch = get_mismatch(uv1[0], uv2[0])
v_mismatch = get_mismatch(uv1[1], uv2[1])
angle1 = get_angle(uv1[0], uv1[1])
angle2 = get_angle(uv2[0], uv2[1])
angle_mismatch = abs(angle1 - angle2)
area1 = get_area(uv1)
area2 = get_area(uv2)
if abs(u_mismatch) < max_mismatch and abs(
v_mismatch) < max_mismatch:
if angle_mismatch < max_angle_diff:
found.append((uv1, uv2, max(area1, area2), u_mismatch,
v_mismatch, angle_mismatch))
# to increase the speed of the algorithm
# Since the algorithm searches by increasing order from r_list,
# lowest area match is found first
# Or if past 59 seconds from the search loop , exit
if found:
break # stop searching when first match is found
if found:
print('\nMATCH FOUND\n')
if best_match == 'area': # sort based on area and return lowest area
uv_all = sorted(found, key=lambda x: x[2])
elif best_match == 'mismatch': # sort based on average of uv mismatches
uv_all = sorted(found, key=lambda x: (abs(x[3]) + abs(x[4])) / 2)
if return_all_matches:
return uv_all
uv_opt = uv_all[0] # min. area match
print('Best match:\nuv1:\n{0}\nuv2:\n{1}\narea:\n{2}\n'.format(
uv_opt[0], uv_opt[1], uv_opt[2]))
print('Lattice mismatch[u, v & alpha]:\n{0} \%, {1} \%, \
{2} degrees\n'.format(uv_opt[3]*100, uv_opt[4]*100, uv_opt[5]))
#print('\nSmallest area matched uv\n')
return uv_opt[0], uv_opt[1]
else:
print('\n NO MATCH FOUND\n')
return None, None
def get_uniq_layercoords(struct, nlayers, top=True):
"""
returns the coordinates of unique sites in the top or bottom
nlayers of the given structure.
Args:
struct: input structure
nlayers: number of layers
top: top or bottom layers, default is top layer
Return:
numpy array of unique coordinates
"""
coords = np.array([site.coords for site in struct])
z = coords[:, 2]
z = np.around(z, decimals=4)
zu, zuind = np.unique(z, return_index=True)
if top:
z_nthlayer = z[zuind[-nlayers]]
zfilter = (z >= z_nthlayer)
else:
z_nthlayer = z[zuind[nlayers - 1]]
zfilter = (z <= z_nthlayer)
# site indices in the layers
indices_layers = np.argwhere(zfilter).ravel()
sa = SpacegroupAnalyzer(struct)
symm_data = sa.get_symmetry_dataset()
# equivalency mapping for the structure
# i'th site in the struct equivalent to eq_struct[i]'th site
eq_struct = symm_data["equivalent_atoms"]
# equivalency mapping for the layers
eq_layers = eq_struct[indices_layers]
# site indices of unique atoms in the layers
__, ueq_layers_indices = np.unique(eq_layers, return_index=True)
# print(ueq_layers_indices)
indices_uniq = indices_layers[ueq_layers_indices]
# coordinates of the unique atoms in the layers
return coords[indices_uniq]
def get_interface(substrate, mat2d, nlayers_2d=2, nlayers_substrate=2,
separation=5):
"""
For the given lattice matched 2D material and substrate structures,
this functions computes all unique sites in the interface layers
and subsequently generates all possible unique 2d/substrate
interfaces and writes the corresponding poscar files
Args:
mat2d: Lattice and symmetry-matched 2D material structure
substrate: Lattice and symmetry-matched 2D substrate structure
nlayers_substrate: number of substrate layers
nlayers_2d: number of 2d material layers
separation: separation between the substrate and the 2d
material
Returns:
None
TODO: give additional random placement of 2D material on substrate
"""
# immediate exit if no structures
if not (mat2d and substrate):
print("no structures. aborting lattice match ...")
return None
#sys.exit()
# unique site coordinates in the substrate top layers
coords_uniq_sub = get_uniq_layercoords(substrate,
nlayers_substrate,
top=True)
# unique site coordinates in the 2D material bottom layers
coords_uniq_2d = get_uniq_layercoords(mat2d,
nlayers_2d,
top=False)
substrate_top_z = np.max(np.array([site.coords
for site in substrate])[:, 2])
mat_2d_bottom = np.min(np.array([site.coords
for site in mat2d])[:, 2])
# shift normal to the surface by 'seperation'
surface_normal = substrate.lattice.matrix[2, :]
origin = np.array([0, 0, substrate_top_z])
shift_normal = surface_normal / np.linalg.norm(surface_normal) * separation
# generate all possible interfaces, one for each combination of
# unique substrate and unique 2d materials site in the layers .i.e
# an interface structure for each parallel shift
# interface = 2D material + substrate
interface = substrate.copy()
shift_parallel = coords_uniq_sub[0] - coords_uniq_2d[0]
shift_parallel[2] = 0
shift_net = shift_normal - shift_parallel
# generate new coords for 2D material to be added to substrate
new_coords = []
inds = []
mat_species = []
for ind, site in enumerate(mat2d):
new_coord = site.coords
new_coord[2] = site.coords[2] - mat_2d_bottom
new_coord = new_coord + origin + shift_net
new_coords.append(new_coord)
inds.append(ind)
mat_species.append(site.specie)
inds = np.array(inds) + len(substrate)
# insert mat2d coords and species at the end of the interface using index
# Not using Structure.append method as it seems to disrupt atoms order
for i, specie, coord in zip(inds, mat_species, new_coords):
interface.insert(i, specie, coord, coords_are_cartesian=True)
return interface
def get_aligned_lattices(sub, twod, max_area=200,
max_mismatch=0.05,
max_angle_diff=1, r1r2_tol=0.2, best_match='area',
uv_matched=None):
"""
given the 2 slab structures and the alignment paramters, return
slab structures with lattices that are aligned with respect to each
other
"""
if not uv_matched:
# get the matching substrate and 2D material lattices
uv_substrate, uv_mat2d = get_matching_lattices(
sub, twod,
max_area=max_area,
max_mismatch=max_mismatch,
max_angle_diff=max_angle_diff,
r1r2_tol=r1r2_tol,
best_match=best_match)
if not uv_substrate and not uv_mat2d:
print("no matching u and v, trying adjusting the parameters")
return None, None
else:
try:
uv_substrate, uv_mat2d = uv_matched
except:
print ('uv_matched should be a list or tuple '
'of (uv_substrate, uv_mat2d)')
return None, None
substrate = copy.deepcopy(sub)
mat2d = copy.deepcopy(twod)
# map the intial slabs to the newly found matching lattices
substrate_latt = Lattice(np.array(
[
uv_substrate[0][:],
uv_substrate[1][:],
substrate.lattice.matrix[2, :]
]))
# to avoid numerical issues with find_mapping
mat2d_fake_c = mat2d.lattice.matrix[2, :] / np.linalg.norm(
mat2d.lattice.matrix[2, :]) * 5.0
mat2d_latt = Lattice(np.array(
[
uv_mat2d[0][:],
uv_mat2d[1][:],
mat2d_fake_c
]))
mat2d_latt_fake = Lattice(np.array(
[
mat2d.lattice.matrix[0, :],
mat2d.lattice.matrix[1, :],
mat2d_fake_c
]))
# try to get a non-singular matrix within 10 attempts
for res in substrate.lattice.find_all_mappings(substrate_latt, ltol=0.05,
atol=max_angle_diff):
scell_1 = res[2]
scell_1[2] = np.array([0, 0, 1])
if np.linalg.det(scell_1) < 1e-5:
continue
else:
break
# try to get a non-singular matrix within 10 attempts
for res in mat2d_latt_fake.find_all_mappings(mat2d_latt, ltol=0.05,
atol=max_angle_diff):
scell_2 = res[2]
scell_2[2] = np.array([0, 0, 1])
if np.linalg.det(scell_2) < 1e-5:
continue
else:
break
substrate.make_supercell(scell_1)
mat2d.make_supercell(scell_2)
# modify the substrate lattice
lmap = Lattice(np.array(
[
substrate.lattice.matrix[0, :],
substrate.lattice.matrix[1, :],
mat2d.lattice.matrix[2, :]
]))
mat2d.lattice = lmap
return substrate, mat2d
def get_all_aligned_lattices(sub, twod, max_area=200,
max_mismatch=0.05, max_angle_diff=1,
r1r2_tol=0.2, best_match='area'):
"""
Get uv_all
call get_aligned_lattices on each uv_all
return list of all_aligned_lattices
"""
uv_all = get_matching_lattices(sub, twod,
max_area=max_area,
max_mismatch=max_mismatch,
max_angle_diff=max_angle_diff,
r1r2_tol=r1r2_tol,
best_match=best_match,
return_all_matches=True)
print ('{} preliminary matched lattices found. '
'Aligning matched lattices..'.format(len(uv_all)))
all_aligned_lattices = []
lat_match_props = {}
for i in range(len(uv_all)):
uv = uv_all[i]
uv_matched = uv[0], uv[1]
try:
sub_lattice, mat2d_lattice = get_aligned_lattices(
sub, twod,
max_area=max_area,
max_mismatch=max_mismatch,
max_angle_diff=max_angle_diff,
r1r2_tol=r1r2_tol,
best_match=best_match,
uv_matched=uv_matched)
except:
continue
lat_match_props[i] = uv[2:]
all_aligned_lattices.append((sub_lattice, mat2d_lattice))
print ('{} lattice matches can be created'.format(
len(all_aligned_lattices)))
return all_aligned_lattices, lat_match_props
def rotate_to_principal_directions(cell):
"""
Author: Benjamin Revard
Rotates the cell into the principal directions. That is, lattice vector
a is parallel to the Cartesian x-axis, lattice vector b lies in the
Cartesian x-y plane and the z-component of lattice vector c is
positive.
Note: this method doesn't change the fractional coordinates of the
sites. However, the Cartesian coordinates may be changed.
"""
# rotate about the z-axis to align a vertically with the x-axis
rotation = RotationTransformation(
[0, 0, 1], 180 - (180/np.pi)*np.arctan2(
cell.lattice.matrix[0][1],
cell.lattice.matrix[0][0]))
new_structure = rotation.apply_transformation(cell)
cell.lattice = new_structure.lattice
# rotate about the y-axis to make a parallel to the x-axis
rotation = RotationTransformation(
[0, 1, 0], (180/np.pi)*np.arctan2(
cell.lattice.matrix[0][2],
cell.lattice.matrix[0][0]))
new_structure = rotation.apply_transformation(cell)
cell.lattice = new_structure.lattice
# rotate about the x-axis to make b lie in the x-y plane
rotation = RotationTransformation(
[1, 0, 0], 180 - (180/np.pi)*np.arctan2(
cell.lattice.matrix[1][2],
cell.lattice.matrix[1][1]))
new_structure = rotation.apply_transformation(cell)
cell.lattice = new_structure.lattice
# make sure they are all pointing in positive directions
if cell.lattice.matrix[0][0] < 0:
# rotate about y-axis to make a positive
rotation = RotationTransformation([0, 1, 0], 180)
new_structure = rotation.apply_transformation(cell)
cell.lattice = new_structure.lattice
if cell.lattice.matrix[1][1] < 0:
# rotate about x-axis to make b positive
rotation = RotationTransformation([1, 0, 0], 180)
new_structure = rotation.apply_transformation(cell)
cell.lattice = new_structure.lattice
if cell.lattice.matrix[2][2] < 0:
# mirror c across the x-y plane to make it positive
# a and b
a = cell.lattice.matrix[0]
b = cell.lattice.matrix[1]
# the components of c
cx = cell.lattice.matrix[2][0]
cy = cell.lattice.matrix[2][1]
cz = -1*cell.lattice.matrix[2][2]
cell.lattice = Lattice([a, b, [cx, cy, cz]])
def run_lat_match(substrate, twod_layer, match_constraints):
'''
Runs the lattice matching algorithm on a substrate and a 2D materials
Args:
substrate - substrate Cell
twod_layer - 2D layer Cell
match_constraints - dictionary containing max area, max mismatch of u
or v, max angle difference, area ratio tolerence, seperation at the
interface, number of layers for substrate and 2D. Ex: {'max_area':200,
'max_mismatch':0.05, 'max_angle_diff':2, 'r1r2_tol':0.06, 'separation':
3, 'nlayers_substrate':1, 'nlayers_2d':1, 'sd_layers':1}
'''
# variables from the keys
max_area = match_constraints['max_area']
max_mismatch = match_constraints['max_mismatch']
max_angle_diff = match_constraints['max_angle_diff']
r1r2_tol = match_constraints['r1r2_tol']
separation = match_constraints['separation']
nlayers_substrate = match_constraints['nlayers_substrate']
nlayers_2d = match_constraints['nlayers_2d']
sd_layers = match_constraints['sd_layers']
best_match = match_constraints['best_match']
twod_prim = twod_layer.get_primitive_structure()
substrate_prim = substrate.get_primitive_structure()
try:
#get aligned lattices
sub, mat2d = get_aligned_lattices(
substrate_prim,
twod_prim,
max_area=max_area,
max_mismatch=max_mismatch,
max_angle_diff=max_angle_diff,
r1r2_tol=r1r2_tol,
best_match=best_match)
rotate_to_principal_directions(sub)
rotate_to_principal_directions(mat2d)
# sorts atoms wrt electronegativity
# use this order in POTCAR
sub.sort()
mat2d.sort()
n_aligned_sub = sub.num_sites
except:
print ('Lattice match failed due to singular matrix generation for supercell')
return None, None, None
#merge substrate and mat2d in all possible ways
hetero_interface = None
if sub and mat2d:
try:
hetero_interface = get_interface(sub, mat2d,
nlayers_2d, nlayers_substrate,
separation)
except:
print('Lattice match failed at get_interface')
return None, None, None
z_coords_sub = sub.frac_coords[:, 2]
z_unique, z_inds = np.unique(z_coords_sub, return_index=True)
if sd_layers == 0: # freeze all substrate atoms
sd_index = n_aligned_sub - 1
else: # relax top layer of substrate atoms
sd_index = z_inds[len(z_inds)-sd_layers] - 1
if hetero_interface:
return hetero_interface, n_aligned_sub, sd_index
else:
return None, None, None
def get_all_matches(sub, twod, match_constraints, write_all=False):
"""
Returns all matches as a list of pymatgen structure objects
Writes all of them as POSCAR files in a directory 'all_interface_poscars'
"""
# variables from the keys
max_area = match_constraints['max_area']
max_mismatch = match_constraints['max_mismatch']
max_angle_diff = match_constraints['max_angle_diff']
r1r2_tol = match_constraints['r1r2_tol']
separation = match_constraints['separation']
nlayers_substrate = match_constraints['nlayers_substrate']
nlayers_2d = match_constraints['nlayers_2d']
best_match = match_constraints['best_match']
twod_prim = twod.get_primitive_structure()
substrate_prim = sub.get_primitive_structure()
all_aligned_lattices, lat_match_props = get_all_aligned_lattices(
substrate_prim, twod_prim,
max_area=max_area,
max_mismatch=max_mismatch,
max_angle_diff=max_angle_diff,
r1r2_tol=r1r2_tol,
best_match=best_match)
for i, each_set in enumerate(all_aligned_lattices):
print ('The abc of lattice {} is {}'.format(i,
each_set[0].lattice.abc))
all_matched_interfaces = []
for i in range(len(all_aligned_lattices)):
subs, mat2d = all_aligned_lattices[i]
rotate_to_principal_directions(subs)
rotate_to_principal_directions(mat2d)
# sorts atoms wrt electronegativity
# use this order in POTCAR
subs.sort()
mat2d.sort()
if subs and mat2d:
try:
hetero_interface = get_interface(subs, mat2d,
nlayers_2d, nlayers_substrate,
separation)
except:
continue
if hetero_interface:
all_matched_interfaces.append(hetero_interface)
else:
continue
if write_all:
current_path = os.getcwd()
poscar_path = current_path + '/all_interface_poscars/'
print ('Writing all interface structures to {}'.format(poscar_path))
if not os.path.exists(poscar_path):
os.mkdir(poscar_path)
for i, iface in enumerate(all_matched_interfaces):
strct = Structure(iface.lattice, iface.species, iface.frac_coords)
name = poscar_path + 'POSCAR_{}'.format(i)
strct.to(filename=name, fmt='poscar')
props_file = poscar_path + 'match_props.txt'
with open(props_file, 'w') as f:
lines = []
keys = list(lat_match_props.keys())
keys.sort()
for i, k in enumerate(keys):
props = lat_match_props[k]
line = '{0}\t{1:.2f}\t{2:.2f}\t\t{3:.2f}\t\t{4:.2f}\n'.format(
i, props[0], props[1]*100,
props[2]*100, props[3])
lines.append(line)
f.write('ID\tArea\t%strain-a\t%strain-b\tangle (deg)\n\n')
f.writelines(lines)
return all_matched_interfaces
| |
#!/usr/bin/python
# This module calculates ele and van der Waals interactions at points on a grid
"""
alchemicalGrids
This module provides a class that calculates ele and Lennard-Jones interactions
at points on a grid.
"""
import os, time, gzip
import numpy as np
class gridCalculation:
def __init__(self, \
prmtop_FN='apo.prmtop', inpcrd_FN=None, pqr_FN=None, \
header_FN=None, site_FN=None, \
PB_FN=None, ele_FN=None, LJa_FN=None, LJr_FN=None, \
spacing=None, counts=None):
### Parse parameters
self.FNs = {'prmtop':prmtop_FN, 'inpcrd':inpcrd_FN, 'header':header_FN, \
'pqr':{True:'receptor.pqr',False:pqr_FN}[pqr_FN is None], \
'site':{True:'../2-binding_site/measured_binding_site.py', \
False:site_FN}[site_FN is None], \
'PB':{True:'apbs.nc',False:PB_FN}[PB_FN is None], \
'ele':{True:'ele.nc',False:ele_FN}[ele_FN is None], \
'LJa':{True:'LJa.nc',False:LJa_FN}[LJa_FN is None], \
'LJr':{True:'LJr.nc',False:LJr_FN}[LJr_FN is None]}
del prmtop_FN, inpcrd_FN, header_FN, ele_FN, LJa_FN, LJr_FN
# Check that input files are available
for FN in [self.FNs['prmtop'],self.FNs['inpcrd']]:
if not os.path.exists(FN):
raise Exception(FN+' missing!')
# Check that output directories are available
for FN in [self.FNs['ele'],self.FNs['LJa'],self.FNs['LJr']]:
dirN = os.path.dirname(FN)
if dirN!='' and (not os.path.isdir(dirN)):
os.system('mkdir -p '+os.path.dirname(FN))
### Read header from dx file
header = {}
if (self.FNs['header'] is not None) and os.path.isfile(self.FNs['header']):
print 'Reading header from '+self.FNs['header']
headerF = open(self.FNs['header'],'r')
headerData = headerF.read()
headerF.close()
headerLines = headerData.split('\n')
counts = np.array([int(x) for x in headerLines.pop(0).split(' ')[-3:]])
for name in ['origin','d0','d1','d2']:
header[name] = [float(x) for x in headerLines.pop(0).split(' ')[-3:]]
spacing = np.array([header['d0'][0], header['d1'][1], header['d2'][2]])
del headerF, headerLines
# Read binding site parameters
if (self.FNs['site'] is not None) and os.path.isfile(self.FNs['site']):
print 'Reading binding site parameters from '+self.FNs['site']
F = open(self.FNs['site'],'r')
dat = F.read()
F.close()
dat = dat[dat.find('half_edge_length =')+18:]
dat = dat[:dat.find('\n')]
half_edge_length = float(dat.strip())
if (spacing is None):
print 'Using default spacing of 0.25 A'
spacing = np.array([0.25, 0.25, 0.25])
if (counts is None):
counts = np.array(np.ceil(np.array([ \
2.*half_edge_length/spacing[0], \
2.*half_edge_length/spacing[1], \
2.*half_edge_length/spacing[2]])),dtype=int)
# Loads coordinates
import AlGDock.IO
IO_crd = AlGDock.IO.crd()
self.crd = IO_crd.read(self.FNs['inpcrd'])
# Outputs files and parameters
print '*** Files and parameters ***'
print 'Input AMBER prmtop :\t' + self.FNs['prmtop']
print 'Input AMBER inpcrd :\t' + self.FNs['inpcrd']
if self.FNs['header'] is not None:
print 'Input grid header file :\t' + self.FNs['header']
if self.FNs['site'] is not None:
print 'Input binding site info :\t' + self.FNs['site']
print 'Output Poisson-Boltzmann:\t' + self.FNs['PB']
print 'Output electrostatics :\t' + self.FNs['ele']
print 'Output LJ attractive :\t' + self.FNs['LJa']
print 'Output LJ repulsive :\t' + self.FNs['LJr']
print 'Grid spacing :\t', spacing
print 'Grid counts :\t', counts
print
if not os.path.isfile(self.FNs['PB']):
print 'Calculating Poisson-Boltzmann grid'
self.PB_grid(spacing*counts)
else:
print 'Poisson-Boltzmann grid already calculated'
if not (os.path.isfile(self.FNs['ele']) and \
os.path.isfile(self.FNs['LJa']) and \
os.path.isfile(self.FNs['LJr'])):
print 'Calculating direct alchemical grids'
self.direct_grids(spacing, counts)
else:
print 'Direct alchemical grids already calculated'
def direct_grids(self, spacing, counts, no_ele=False):
"""
Calculates direct grids (Lennard Jones and electrostatic)
"""
# Loads a record from AMBER parameter file
def _loadRecord(record):
items = []
lines = record.split('\n')
lines.pop(0) # Name
FORMAT = lines.pop(0).strip()[8:-1] # Format
if FORMAT.find('a')>-1: # Text
w = int(FORMAT[FORMAT.find('a')+1:])
for line in lines:
items = items + [line[x:x+w] for x in range(0,len(line),w)]
elif FORMAT.find('I')>-1: # Integer
w = int(FORMAT[FORMAT.find('I')+1:])
for line in lines:
items = items + [int(line[x:x+w]) for x in range(0,len(line),w)]
elif FORMAT.find('E')>-1: # Scientific
w = int(FORMAT[FORMAT.find('E')+1:FORMAT.find('.')])
for line in lines:
items = items + [float(line[x:x+w]) for x in range(0,len(line),w)]
return np.array(items)
### Loads AMBER parameter file
prmtopF = open(self.FNs['prmtop'],'r')
prmtopData = prmtopF.read().split('%FLAG ')
prmtopF.close()
del prmtopF
varnames = ['POINTERS','CHARGE','NONBONDED_PARM_INDEX',
'LENNARD_JONES_ACOEF','LENNARD_JONES_BCOEF','ATOM_TYPE_INDEX']
prmtop = {}
for record in prmtopData:
name = record[:record.find('\n')].strip()
if name in varnames:
prmtop[name] = _loadRecord(record)
del name, record, varnames, prmtopData
prmtop['CHARGE'] = prmtop['CHARGE']/18.2223 # Convert to units of electric charge
NATOM = prmtop['POINTERS'][0]
NTYPES = prmtop['POINTERS'][1]
### Extract Lennard-Jones well depth and radii for each atom
LJ_radius = np.zeros(shape=(NTYPES), dtype=float)
LJ_depth = np.zeros(shape=(NTYPES), dtype=float)
for i in range(NTYPES):
LJ_index = prmtop['NONBONDED_PARM_INDEX'][NTYPES*i+i]-1
if prmtop['LENNARD_JONES_ACOEF'][LJ_index]<1.0e-6:
LJ_radius[i] = 0
LJ_depth[i] = 0
else:
factor = 2 * prmtop['LENNARD_JONES_ACOEF'][LJ_index] / prmtop['LENNARD_JONES_BCOEF'][LJ_index]
LJ_radius[i] = pow(factor, 1.0/6.0) * 0.5
LJ_depth[i] = prmtop['LENNARD_JONES_BCOEF'][LJ_index] / 2 / factor
# More useful for later calculations
root_LJ_depth = np.sqrt(LJ_depth)
LJ_diameter = LJ_radius*2
del i, LJ_index, factor
### Coordinates of grid points
print 'Calculating grid coordinates'
startTime = time.time()
grid = {}
grid['x'] = np.zeros(shape=tuple(counts), dtype=float)
grid['y'] = np.zeros(shape=tuple(counts), dtype=float)
grid['z'] = np.zeros(shape=tuple(counts), dtype=float)
for i in range(counts[0]):
for j in range(counts[1]):
for k in range(counts[2]):
grid['x'][i,j,k] = i*spacing[0]
grid['y'][i,j,k] = j*spacing[1]
grid['z'][i,j,k] = k*spacing[2]
endTime = time.time()
print ' in %3.2f s'%(endTime-startTime)
### Calculate ele and Lennard-Jones potential energies at grid points
# Units: kcal/mol A e
# For ele potential:
# E = 1/(4*pi*eps_o) q1 q2 / r
#
# 1 kg m^2 | (1.60217646E-19 e)^2 | cal s^2 | 1 kcal | 1E10 A |
# --------------------------------|----------------------|--------------|----------|--------| * 6.0221415E+23
# 4 pi 8.854187817629E-12 C^2 s^2 | C^2 | 4.184 kg m^2 | 1000 cal | 1 m |
# Prefactor is:
# 1/(4*math.pi*8.85418781762E-12)*(1.60217646E-19**2)/4.184*1E10*6.0221415E+23/1000 = 332.06 kcal/mol A e^2
print 'Calculating grid potential energies'
startTime = time.time()
if not no_ele:
grid['ele'] = np.zeros(shape=tuple(counts), dtype=float)
grid['LJr'] = np.zeros(shape=tuple(counts), dtype=float)
grid['LJa'] = np.zeros(shape=tuple(counts), dtype=float)
for atom_index in range(NATOM):
dif_x = grid['x'] - self.crd[atom_index][0]
R2 = dif_x*dif_x
del dif_x
dif_y = grid['y'] - self.crd[atom_index][1]
R2 += dif_y*dif_y
del dif_y
dif_z = grid['z'] - self.crd[atom_index][2]
R2 += dif_z*dif_z
del dif_z
R = np.sqrt(R2)
del R2
atom_type = prmtop['ATOM_TYPE_INDEX'][atom_index]-1
if not no_ele:
grid['ele'] = grid['ele'] + 332.06*prmtop['CHARGE'][atom_index]/R
grid['LJr'] += root_LJ_depth[atom_type]*(LJ_diameter[atom_type]**6)/R**12
grid['LJa'] += -2*root_LJ_depth[atom_type]*(LJ_diameter[atom_type]**3)/R**6
if atom_index%100==0:
endTime = time.time()
print 'Completed atom %d / %d in a total of %3.2f s'%(atom_index,NATOM,endTime-startTime)
endTime = time.time()
print '\t%3.2f s'%(endTime-startTime)
# Cap Lennard-Jones potential energies
u_max = 10000.0
grid['LJr'] = u_max*np.tanh(grid['LJr']/u_max)
grid['LJa'] = u_max*np.tanh(grid['LJa']/u_max)
### Output grids
import AlGDock.IO
IO_Grid = AlGDock.IO.Grid()
print 'Writing grid output'
if not no_ele:
IO_Grid.write(self.FNs['ele'], \
{'origin':np.array([0., 0., 0.]), 'spacing':spacing, 'counts':counts, 'vals':grid['ele'].flatten()})
IO_Grid.write(self.FNs['LJr'], \
{'origin':np.array([0., 0., 0.]), 'spacing':spacing, 'counts':counts, 'vals':grid['LJr'].flatten()})
IO_Grid.write(self.FNs['LJa'], \
{'origin':np.array([0., 0., 0.]), 'spacing':spacing, 'counts':counts, 'vals':grid['LJa'].flatten()})
def PB_grid(self, edge_length):
"""
Calculates a Poisson-Boltzmann grid using APBS
edge_length is a 3 X 1 numpy array
"""
import inspect
import _external_paths
dirs = {}
# Sets up pqr file
if not os.path.exists(self.FNs['pqr']):
command_paths = _external_paths.findPaths(['sander'])
dirs['amber'] = os.path.abspath(\
os.path.dirname(command_paths['sander'])[:-4])
command = 'cat {0} | {1}/bin/ambpdb -p {2} -pqr > {3}'.format(\
self.FNs['inpcrd'],dirs['amber'],self.FNs['prmtop'],self.FNs['pqr'])
os.system(command)
# Determine the grid parameters
full_spacing = 1.0
focus_spacing = 0.5
final_spacing = focus_spacing
# The final grid spans the same space as the other grids
final_dims = np.array(np.ceil(edge_length/focus_spacing),dtype=int)
final_center = edge_length/2.
def roundUpDime(x):
return (np.ceil((x.astype(float)-1)/32)*32+1).astype(int)
# The focus grid has the same center but the dimensions are rounded up
focus_dims = roundUpDime(final_dims)
focus_center = final_center
# The full grid spans 1.5 times the molecule range
# and the focus grid, whatever is larger
min_xyz = np.array([min(self.crd[a,:]) for a in range(3)])
max_xyz = np.array([max(self.crd[a,:]) for a in range(3)])
mol_range = max_xyz - min_xyz
mol_center = (min_xyz + max_xyz)/2.
full_min = np.minimum(mol_center - mol_range/2.*1.5, \
focus_center - focus_dims*focus_spacing/2.*1.5)
full_max = np.maximum(mol_center + mol_range/2.*1.5, \
focus_center + focus_dims*focus_spacing/2.*1.5)
full_dims = roundUpDime((full_max-full_min)/full_spacing)
full_center = (full_min + full_max)/2.
print 'There are the grid ranges:'
print 'Full'
print ' Min',full_min
print ' Max',full_max
print ' Center',full_center
print ' Spacing',full_spacing
print ' Points per dimension',full_dims
print 'Focus'
print ' Min',focus_center - (focus_dims-1)*focus_spacing/2.
print ' Max',focus_center + (focus_dims-1)*focus_spacing/2.
print ' Center',focus_center
print ' Spacing',focus_spacing
print ' Points per dimension',focus_dims
print 'Final'
print ' Min',final_center - (final_dims-1)*final_spacing/2.
print ' Max',final_center + (final_dims-1)*final_spacing/2.
print ' Center',final_center
print ' Spacing',final_spacing
print ' Points per dimension',final_dims
# Writes APBS script
apbsF = open('apbs.in','w')
apbsF.write('''READ
mol pqr {0}
END
ELEC mg-manual # large grid centered at center of range
bcfl mdh # multiple debye-huckel boundary condition
chgm spl4 # quintic B-spline charge discretization
dime {1[0]} {1[1]} {1[2]}
gcent {2[0]} {2[1]} {2[2]}
grid {3} {3} {3}
lpbe # Linearized Poisson-Boltzmann
mol 1
pdie 2.0
sdens 10.0
sdie 80.0
srad 1.4
srfm smol # Smoothed dielectric and ion-accessibility coefficients
swin 0.3
temp 300.0
END
ELEC mg-manual # focus grid around ligand binding site
bcfl focus # multiple debye-huckel boundary condition
chgm spl4 # quintic B-spline charge discretization
dime {4[0]} {4[1]} {4[2]}
gcent {5[0]} {5[1]} {5[2]}
grid {6} {6} {6}
lpbe # Linearized Poisson-Boltzmann
mol 1
pdie 2.0
sdens 10.0
sdie 80.0
srad 1.4
srfm smol # Smoothed dielectric and ion-accessibility coefficients
swin 0.3
temp 300.0
write pot dx apbs_focus
END'''.format(self.FNs['pqr'], \
full_dims, full_center, full_spacing, \
focus_dims, focus_center, focus_spacing))
apbsF.close()
# Execute APBS
if not (os.path.isfile('apbs_focus.dx') or os.path.isfile(self.FNs['PB'])):
try:
command_paths = _external_paths.findPaths(['apbs'])
os.system(command_paths['apbs'] + ' apbs.in > apbs.out')
except:
print 'APBS failure!'
return
# Truncate the grid and convert to kcal/mol
# APBS reports electrostatic grid potential energies in kBT e_c^{-1}
# The others are in kcal/mol e_c^{-1}
# At 300 K, 1 kBT ~ 0.596 kcal/mol
if not os.path.isfile(self.FNs['PB']):
import AlGDock.IO
IO_Grid = AlGDock.IO.Grid()
print final_dims
IO_Grid.truncate('apbs_focus.dx', self.FNs['PB'], \
final_dims, multiplier=0.596)
# Remove intermediate files
for FN in [self.FNs['pqr'], 'io.mc', 'apbs.in', 'apbs.out']:
if os.path.isfile(FN):
os.remove(FN)
if __name__ == '__main__':
import sys
try:
import argparse
parser = argparse.ArgumentParser(description='Calculate van der Waals and ele grids')
parser.add_argument('--prmtop_FN', help='Input AMBER PRMTOP file')
parser.add_argument('--inpcrd_FN', help='Input coordinates')
parser.add_argument('--pqr_FN', help='Input for APBS (optional)')
parser.add_argument('--header_FN', help='Input grid header (optional)')
parser.add_argument('--site_FN', help='Input binding site parameters (optional)')
parser.add_argument('--PB_FN', help='Output for Poisson-Boltzmann grid')
parser.add_argument('--ele_FN', help='Output for electrostatic grid')
parser.add_argument('--LJa_FN', help='Output for attractive Lennard-Jones grid')
parser.add_argument('--LJr_FN', help='Output for repulsive Lennard-Jones grid')
parser.add_argument('--spacing', nargs=3, type=float, help='Grid spacing (overrides header)')
parser.add_argument('--counts', nargs=3, type=int, help='Number of point in each direction (overrides header)')
args = parser.parse_args()
except:
import optparse
parser = optparse.OptionParser()
parser.add_option('--prmtop_FN', help='Input AMBER PRMTOP file')
parser.add_option('--inpcrd_FN', help='Input coordinates')
parser.add_option('--pqr_FN', help='Input for APBS (optional)')
parser.add_option('--header_FN', help='Input grid header (optional)')
parser.add_option('--site_FN', help='Input binding site parameters (optional)')
parser.add_option('--PB_FN', help='Output for Poisson-Boltzmann grid')
parser.add_option('--ele_FN', help='Output for electrostatic grid')
parser.add_option('--LJa_FN', help='Output for attractive Lennard-Jones grid')
parser.add_option('--LJr_FN', help='Output for repulsive Lennard-Jones grid')
parser.add_option('--spacing', nargs=3, type="float", help='Grid spacing')
parser.add_option('--counts', nargs=3, type="float", help='Grid dimensions')
(args,options) = parser.parse_args()
calc = gridCalculation(**vars(args))
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010-2011 OpenStack Foundation.
# Copyright 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import re
import sqlalchemy
from sqlalchemy import Boolean
from sqlalchemy import CheckConstraint
from sqlalchemy import Column
from sqlalchemy.engine import reflection
from sqlalchemy.ext.compiler import compiles
from sqlalchemy import func
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import or_
from sqlalchemy.sql.expression import literal_column
from sqlalchemy.sql.expression import UpdateBase
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy.types import NullType
from ceilometer.openstack.common import context as request_context
from ceilometer.openstack.common.db.sqlalchemy import models
from ceilometer.openstack.common.gettextutils import _, _LI, _LW
from ceilometer.openstack.common import timeutils
LOG = logging.getLogger(__name__)
_DBURL_REGEX = re.compile(r"[^:]+://([^:]+):([^@]+)@.+")
def sanitize_db_url(url):
match = _DBURL_REGEX.match(url)
if match:
return '%s****:****%s' % (url[:match.start(1)], url[match.end(2):])
return url
class InvalidSortKey(Exception):
message = _("Sort key supplied was not valid.")
# copy from glance/db/sqlalchemy/api.py
def paginate_query(query, model, limit, sort_keys, marker=None,
sort_dir=None, sort_dirs=None):
"""Returns a query with sorting / pagination criteria added.
Pagination works by requiring a unique sort_key, specified by sort_keys.
(If sort_keys is not unique, then we risk looping through values.)
We use the last row in the previous page as the 'marker' for pagination.
So we must return values that follow the passed marker in the order.
With a single-valued sort_key, this would be easy: sort_key > X.
With a compound-values sort_key, (k1, k2, k3) we must do this to repeat
the lexicographical ordering:
(k1 > X1) or (k1 == X1 && k2 > X2) or (k1 == X1 && k2 == X2 && k3 > X3)
We also have to cope with different sort_directions.
Typically, the id of the last row is used as the client-facing pagination
marker, then the actual marker object must be fetched from the db and
passed in to us as marker.
:param query: the query object to which we should add paging/sorting
:param model: the ORM model class
:param limit: maximum number of items to return
:param sort_keys: array of attributes by which results should be sorted
:param marker: the last item of the previous page; we returns the next
results after this value.
:param sort_dir: direction in which results should be sorted (asc, desc)
:param sort_dirs: per-column array of sort_dirs, corresponding to sort_keys
:rtype: sqlalchemy.orm.query.Query
:return: The query with sorting/pagination added.
"""
if 'id' not in sort_keys:
# TODO(justinsb): If this ever gives a false-positive, check
# the actual primary key, rather than assuming its id
LOG.warning(_LW('Id not in sort_keys; is sort_keys unique?'))
assert(not (sort_dir and sort_dirs))
# Default the sort direction to ascending
if sort_dirs is None and sort_dir is None:
sort_dir = 'asc'
# Ensure a per-column sort direction
if sort_dirs is None:
sort_dirs = [sort_dir for _sort_key in sort_keys]
assert(len(sort_dirs) == len(sort_keys))
# Add sorting
for current_sort_key, current_sort_dir in zip(sort_keys, sort_dirs):
try:
sort_dir_func = {
'asc': sqlalchemy.asc,
'desc': sqlalchemy.desc,
}[current_sort_dir]
except KeyError:
raise ValueError(_("Unknown sort direction, "
"must be 'desc' or 'asc'"))
try:
sort_key_attr = getattr(model, current_sort_key)
except AttributeError:
raise InvalidSortKey()
query = query.order_by(sort_dir_func(sort_key_attr))
# Add pagination
if marker is not None:
marker_values = []
for sort_key in sort_keys:
v = getattr(marker, sort_key)
marker_values.append(v)
# Build up an array of sort criteria as in the docstring
criteria_list = []
for i in range(len(sort_keys)):
crit_attrs = []
for j in range(i):
model_attr = getattr(model, sort_keys[j])
crit_attrs.append((model_attr == marker_values[j]))
model_attr = getattr(model, sort_keys[i])
if sort_dirs[i] == 'desc':
crit_attrs.append((model_attr < marker_values[i]))
else:
crit_attrs.append((model_attr > marker_values[i]))
criteria = sqlalchemy.sql.and_(*crit_attrs)
criteria_list.append(criteria)
f = sqlalchemy.sql.or_(*criteria_list)
query = query.filter(f)
if limit is not None:
query = query.limit(limit)
return query
def _read_deleted_filter(query, db_model, read_deleted):
if 'deleted' not in db_model.__table__.columns:
raise ValueError(_("There is no `deleted` column in `%s` table. "
"Project doesn't use soft-deleted feature.")
% db_model.__name__)
default_deleted_value = db_model.__table__.c.deleted.default.arg
if read_deleted == 'no':
query = query.filter(db_model.deleted == default_deleted_value)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter(db_model.deleted != default_deleted_value)
else:
raise ValueError(_("Unrecognized read_deleted value '%s'")
% read_deleted)
return query
def _project_filter(query, db_model, context, project_only):
if project_only and 'project_id' not in db_model.__table__.columns:
raise ValueError(_("There is no `project_id` column in `%s` table.")
% db_model.__name__)
if request_context.is_user_context(context) and project_only:
if project_only == 'allow_none':
is_none = None
query = query.filter(or_(db_model.project_id == context.project_id,
db_model.project_id == is_none))
else:
query = query.filter(db_model.project_id == context.project_id)
return query
def model_query(context, model, session, args=None, project_only=False,
read_deleted=None):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param model: Model to query. Must be a subclass of ModelBase.
:type model: models.ModelBase
:param session: The session to use.
:type session: sqlalchemy.orm.session.Session
:param args: Arguments to query. If None - model is used.
:type args: tuple
:param project_only: If present and context is user-type, then restrict
query to match the context's project_id. If set to
'allow_none', restriction includes project_id = None.
:type project_only: bool
:param read_deleted: If present, overrides context's read_deleted field.
:type read_deleted: bool
Usage:
..code:: python
result = (utils.model_query(context, models.Instance, session=session)
.filter_by(uuid=instance_uuid)
.all())
query = utils.model_query(
context, Node,
session=session,
args=(func.count(Node.id), func.sum(Node.ram))
).filter_by(project_id=project_id)
"""
if not read_deleted:
if hasattr(context, 'read_deleted'):
# NOTE(viktors): some projects use `read_deleted` attribute in
# their contexts instead of `show_deleted`.
read_deleted = context.read_deleted
else:
read_deleted = context.show_deleted
if not issubclass(model, models.ModelBase):
raise TypeError(_("model should be a subclass of ModelBase"))
query = session.query(model) if not args else session.query(*args)
query = _read_deleted_filter(query, model, read_deleted)
query = _project_filter(query, model, context, project_only)
return query
def get_table(engine, name):
"""Returns an sqlalchemy table dynamically from db.
Needed because the models don't work for us in migrations
as models will be far out of sync with the current data.
"""
metadata = MetaData()
metadata.bind = engine
return Table(name, metadata, autoload=True)
class InsertFromSelect(UpdateBase):
"""Form the base for `INSERT INTO table (SELECT ... )` statement."""
def __init__(self, table, select):
self.table = table
self.select = select
@compiles(InsertFromSelect)
def visit_insert_from_select(element, compiler, **kw):
"""Form the `INSERT INTO table (SELECT ... )` statement."""
return "INSERT INTO %s %s" % (
compiler.process(element.table, asfrom=True),
compiler.process(element.select))
class ColumnError(Exception):
"""Error raised when no column or an invalid column is found."""
def _get_not_supported_column(col_name_col_instance, column_name):
try:
column = col_name_col_instance[column_name]
except KeyError:
msg = _("Please specify column %s in col_name_col_instance "
"param. It is required because column has unsupported "
"type by sqlite).")
raise ColumnError(msg % column_name)
if not isinstance(column, Column):
msg = _("col_name_col_instance param has wrong type of "
"column instance for column %s It should be instance "
"of sqlalchemy.Column.")
raise ColumnError(msg % column_name)
return column
def drop_unique_constraint(migrate_engine, table_name, uc_name, *columns,
**col_name_col_instance):
"""Drop unique constraint from table.
DEPRECATED: this function is deprecated and will be removed from ceilometer.db
in a few releases. Please use UniqueConstraint.drop() method directly for
sqlalchemy-migrate migration scripts.
This method drops UC from table and works for mysql, postgresql and sqlite.
In mysql and postgresql we are able to use "alter table" construction.
Sqlalchemy doesn't support some sqlite column types and replaces their
type with NullType in metadata. We process these columns and replace
NullType with the correct column type.
:param migrate_engine: sqlalchemy engine
:param table_name: name of table that contains uniq constraint.
:param uc_name: name of uniq constraint that will be dropped.
:param columns: columns that are in uniq constraint.
:param col_name_col_instance: contains pair column_name=column_instance.
column_instance is instance of Column. These params
are required only for columns that have unsupported
types by sqlite. For example BigInteger.
"""
from migrate.changeset import UniqueConstraint
meta = MetaData()
meta.bind = migrate_engine
t = Table(table_name, meta, autoload=True)
if migrate_engine.name == "sqlite":
override_cols = [
_get_not_supported_column(col_name_col_instance, col.name)
for col in t.columns
if isinstance(col.type, NullType)
]
for col in override_cols:
t.columns.replace(col)
uc = UniqueConstraint(*columns, table=t, name=uc_name)
uc.drop()
def drop_old_duplicate_entries_from_table(migrate_engine, table_name,
use_soft_delete, *uc_column_names):
"""Drop all old rows having the same values for columns in uc_columns.
This method drop (or mark ad `deleted` if use_soft_delete is True) old
duplicate rows form table with name `table_name`.
:param migrate_engine: Sqlalchemy engine
:param table_name: Table with duplicates
:param use_soft_delete: If True - values will be marked as `deleted`,
if False - values will be removed from table
:param uc_column_names: Unique constraint columns
"""
meta = MetaData()
meta.bind = migrate_engine
table = Table(table_name, meta, autoload=True)
columns_for_group_by = [table.c[name] for name in uc_column_names]
columns_for_select = [func.max(table.c.id)]
columns_for_select.extend(columns_for_group_by)
duplicated_rows_select = sqlalchemy.sql.select(
columns_for_select, group_by=columns_for_group_by,
having=func.count(table.c.id) > 1)
for row in migrate_engine.execute(duplicated_rows_select):
# NOTE(boris-42): Do not remove row that has the biggest ID.
delete_condition = table.c.id != row[0]
is_none = None # workaround for pyflakes
delete_condition &= table.c.deleted_at == is_none
for name in uc_column_names:
delete_condition &= table.c[name] == row[name]
rows_to_delete_select = sqlalchemy.sql.select(
[table.c.id]).where(delete_condition)
for row in migrate_engine.execute(rows_to_delete_select).fetchall():
LOG.info(_LI("Deleting duplicated row with id: %(id)s from table: "
"%(table)s") % dict(id=row[0], table=table_name))
if use_soft_delete:
delete_statement = table.update().\
where(delete_condition).\
values({
'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()
})
else:
delete_statement = table.delete().where(delete_condition)
migrate_engine.execute(delete_statement)
def _get_default_deleted_value(table):
if isinstance(table.c.id.type, Integer):
return 0
if isinstance(table.c.id.type, String):
return ""
raise ColumnError(_("Unsupported id columns type"))
def _restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes):
table = get_table(migrate_engine, table_name)
insp = reflection.Inspector.from_engine(migrate_engine)
real_indexes = insp.get_indexes(table_name)
existing_index_names = dict(
[(index['name'], index['column_names']) for index in real_indexes])
# NOTE(boris-42): Restore indexes on `deleted` column
for index in indexes:
if 'deleted' not in index['column_names']:
continue
name = index['name']
if name in existing_index_names:
column_names = [table.c[c] for c in existing_index_names[name]]
old_index = Index(name, *column_names, unique=index["unique"])
old_index.drop(migrate_engine)
column_names = [table.c[c] for c in index['column_names']]
new_index = Index(index["name"], *column_names, unique=index["unique"])
new_index.create(migrate_engine)
def change_deleted_column_type_to_boolean(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_boolean_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
old_deleted = Column('old_deleted', Boolean, default=False)
old_deleted.create(table, populate_default=False)
table.update().\
where(table.c.deleted == table.c.id).\
values(old_deleted=True).\
execute()
table.c.deleted.drop()
table.c.old_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_boolean_sqlite(migrate_engine, table_name,
**col_name_col_instance):
insp = reflection.Inspector.from_engine(migrate_engine)
table = get_table(migrate_engine, table_name)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', Boolean, default=0)
columns.append(column_copy)
constraints = [constraint.copy() for constraint in table.constraints]
meta = table.metadata
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
c_select = []
for c in table.c:
if c.name != "deleted":
c_select.append(c)
else:
c_select.append(table.c.deleted == table.c.id)
ins = InsertFromSelect(new_table, sqlalchemy.sql.select(c_select))
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
new_table.update().\
where(new_table.c.deleted == new_table.c.id).\
values(deleted=True).\
execute()
def change_deleted_column_type_to_id_type(migrate_engine, table_name,
**col_name_col_instance):
if migrate_engine.name == "sqlite":
return _change_deleted_column_type_to_id_type_sqlite(
migrate_engine, table_name, **col_name_col_instance)
insp = reflection.Inspector.from_engine(migrate_engine)
indexes = insp.get_indexes(table_name)
table = get_table(migrate_engine, table_name)
new_deleted = Column('new_deleted', table.c.id.type,
default=_get_default_deleted_value(table))
new_deleted.create(table, populate_default=True)
deleted = True # workaround for pyflakes
table.update().\
where(table.c.deleted == deleted).\
values(new_deleted=table.c.id).\
execute()
table.c.deleted.drop()
table.c.new_deleted.alter(name="deleted")
_restore_indexes_on_deleted_columns(migrate_engine, table_name, indexes)
def _change_deleted_column_type_to_id_type_sqlite(migrate_engine, table_name,
**col_name_col_instance):
# NOTE(boris-42): sqlaclhemy-migrate can't drop column with check
# constraints in sqlite DB and our `deleted` column has
# 2 check constraints. So there is only one way to remove
# these constraints:
# 1) Create new table with the same columns, constraints
# and indexes. (except deleted column).
# 2) Copy all data from old to new table.
# 3) Drop old table.
# 4) Rename new table to old table name.
insp = reflection.Inspector.from_engine(migrate_engine)
meta = MetaData(bind=migrate_engine)
table = Table(table_name, meta, autoload=True)
default_deleted_value = _get_default_deleted_value(table)
columns = []
for column in table.columns:
column_copy = None
if column.name != "deleted":
if isinstance(column.type, NullType):
column_copy = _get_not_supported_column(col_name_col_instance,
column.name)
else:
column_copy = column.copy()
else:
column_copy = Column('deleted', table.c.id.type,
default=default_deleted_value)
columns.append(column_copy)
def is_deleted_column_constraint(constraint):
# NOTE(boris-42): There is no other way to check is CheckConstraint
# associated with deleted column.
if not isinstance(constraint, CheckConstraint):
return False
sqltext = str(constraint.sqltext)
return (sqltext.endswith("deleted in (0, 1)") or
sqltext.endswith("deleted IN (:deleted_1, :deleted_2)"))
constraints = []
for constraint in table.constraints:
if not is_deleted_column_constraint(constraint):
constraints.append(constraint.copy())
new_table = Table(table_name + "__tmp__", meta,
*(columns + constraints))
new_table.create()
indexes = []
for index in insp.get_indexes(table_name):
column_names = [new_table.c[c] for c in index['column_names']]
indexes.append(Index(index["name"], *column_names,
unique=index["unique"]))
ins = InsertFromSelect(new_table, table.select())
migrate_engine.execute(ins)
table.drop()
[index.create(migrate_engine) for index in indexes]
new_table.rename(table_name)
deleted = True # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=new_table.c.id).\
execute()
# NOTE(boris-42): Fix value of deleted column: False -> "" or 0.
deleted = False # workaround for pyflakes
new_table.update().\
where(new_table.c.deleted == deleted).\
values(deleted=default_deleted_value).\
execute()
def get_connect_string(backend, database, user=None, passwd=None):
"""Get database connection
Try to get a connection with a very specific set of values, if we get
these then we'll run the tests, otherwise they are skipped
"""
args = {'backend': backend,
'user': user,
'passwd': passwd,
'database': database}
if backend == 'sqlite':
template = '%(backend)s:///%(database)s'
else:
template = "%(backend)s://%(user)s:%(passwd)s@localhost/%(database)s"
return template % args
def is_backend_avail(backend, database, user=None, passwd=None):
try:
connect_uri = get_connect_string(backend=backend,
database=database,
user=user,
passwd=passwd)
engine = sqlalchemy.create_engine(connect_uri)
connection = engine.connect()
except Exception:
# intentionally catch all to handle exceptions even if we don't
# have any backend code loaded.
return False
else:
connection.close()
engine.dispose()
return True
def get_db_connection_info(conn_pieces):
database = conn_pieces.path.strip('/')
loc_pieces = conn_pieces.netloc.split('@')
host = loc_pieces[1]
auth_pieces = loc_pieces[0].split(':')
user = auth_pieces[0]
password = ""
if len(auth_pieces) > 1:
password = auth_pieces[1].strip()
return (user, password, database, host)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division
import filecmp
import os
from PIL import Image
import flask_fs as fs
from flask_fs.mongo import FileField, ImageField
from flask_mongoengine import MongoEngine
import pytest
db = MongoEngine()
class MongoEngineTestCase(object):
@pytest.fixture(autouse=True)
def storage(self, app, tmpdir):
app.instance_path = str(tmpdir)
storage = fs.Storage('test', fs.ALL)
fs.init_app(app, storage)
db.init_app(app)
yield storage
with app.test_request_context():
db_name = app.config['MONGODB_DB']
try:
db.connection.client.drop_database(db_name)
except TypeError:
db.connection.drop_database(db_name)
class FileFieldTest(MongoEngineTestCase):
def test_default_validate(self, storage):
class Tester(db.Document):
file = FileField(fs=storage)
tester = Tester()
assert tester.validate() is None
assert not tester.file
assert str(tester.file) == ''
assert tester.to_mongo() == {}
assert tester.file.filename is None
def test_set_filename(self, storage):
class Tester(db.Document):
file = FileField(fs=storage)
filename = 'file.test'
tester = Tester()
tester.file = filename
assert tester.validate() is None
assert tester.file
assert tester.file.filename == filename
assert tester.to_mongo() == {
'file': {
'filename': filename,
}
}
tester.save()
tester.reload()
assert tester.file.filename == filename
def test_save_from_file(self, storage, binfile):
class Tester(db.Document):
file = FileField(fs=storage)
filename = 'test.png'
tester = Tester()
f = open(binfile, 'rb')
tester.file.save(f, filename)
tester.validate()
assert tester.file
assert str(tester.file) == tester.file.url
assert tester.file.filename == filename
assert tester.to_mongo() == {
'file': {
'filename': filename,
}
}
assert filename in storage
assert filecmp.cmp(storage.path(filename), binfile)
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == filename
def test_save_from_filestorage(self, storage, utils):
class Tester(db.Document):
file = FileField(fs=storage)
filename = 'test.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'))
tester.validate()
assert tester.file
assert str(tester.file) == tester.file.url
assert tester.file.filename == filename
assert tester.to_mongo() == {
'file': {
'filename': filename,
}
}
assert filename in storage
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == filename
def test_save_with_upload_to(self, storage, utils):
upload_to = 'prefix'
class Tester(db.Document):
file = FileField(fs=storage, upload_to=upload_to)
filename = 'test.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'))
tester.validate()
expected_filename = '/'.join([upload_to, filename])
assert tester.file
assert tester.file.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'file': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == expected_filename
def test_save_with_callable_upload_to(self, storage, utils):
upload_to = 'prefix'
class Tester(db.Document):
file = FileField(fs=storage, upload_to=lambda o: upload_to)
filename = 'test.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'))
tester.validate()
expected_filename = '/'.join([upload_to, filename])
assert tester.file
assert tester.file.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'file': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == expected_filename
def test_save_with_callable_basename(self, storage, utils):
class Tester(db.Document):
file = FileField(fs=storage, basename=lambda o: 'prefix/filename')
filename = 'test.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'))
tester.validate()
expected_filename = 'prefix/filename.txt'
assert tester.file
assert tester.file.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'file': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == expected_filename
def test_save_with_callable_basename_override(self, storage, utils):
class Tester(db.Document):
file = FileField(fs=storage, basename=lambda o: 'prefix/filename')
filename = 'test.txt'
expected_filename = 'other.txt'
tester = Tester()
tester.file.save(utils.filestorage(filename, 'this is a stest'), expected_filename)
tester.validate()
assert tester.file
assert tester.file.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'file': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.file.filename == expected_filename
class ImageFieldTestMixin(MongoEngineTestCase):
@pytest.fixture
def resource(self, utils, image):
return utils.filestorage('flask.{0}'.format(self.ext), image)
def test_default_validate(self, storage):
class Tester(db.Document):
image = ImageField(fs=storage)
tester = Tester()
assert tester.validate() is None
assert not tester.image
assert str(tester.image) == ''
assert tester.to_mongo() == {}
assert tester.image.filename is None
assert tester.image.original is None
def test_save_file(self, storage, image):
class Tester(db.Document):
image = ImageField(fs=storage)
filename = 'test.{0}'.format(self.ext)
tester = Tester()
tester.image.save(image, filename)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
with open(storage.path(filename), 'rb') as f_stored:
stored = Image.open(f_stored)
original = Image.open(image)
assert stored.size == original.size
def test_save_filestorage(self, storage, resource, image):
class Tester(db.Document):
image = ImageField(fs=storage)
filename = 'flask.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
with open(storage.path(filename), 'rb') as f_stored:
stored = Image.open(f_stored)
original = Image.open(image)
assert stored.size == original.size
def test_save_optimize_settings(self, app, storage, resource, image):
app.config['FS_IMAGES_OPTIMIZE'] = True
class Tester(db.Document):
image = ImageField(fs=storage)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'original': filename_original,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename_original
path_original = storage.path(filename_original)
path_optimized = storage.path(filename)
with open(path_original, 'rb') as f_orig:
with open(path_optimized, 'rb') as f_optimized:
source = Image.open(image)
original = Image.open(f_orig)
optimized = Image.open(f_optimized)
assert original.size == source.size
assert optimized.size == source.size
assert os.stat(path_optimized).st_size < os.stat(path_original).st_size
def test_save_optimize_attribute(self, app, storage, resource, image):
class Tester(db.Document):
image = ImageField(fs=storage, optimize=True)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'original': filename_original,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename_original
path_original = storage.path(filename_original)
path_optimized = storage.path(filename)
with open(path_original, 'rb') as f_orig:
with open(path_optimized, 'rb') as f_optimized:
source = Image.open(image)
original = Image.open(f_orig)
optimized = Image.open(f_optimized)
assert original.size == source.size
assert optimized.size == source.size
assert os.stat(path_optimized).st_size < os.stat(path_original).st_size
def test_save_max_size(self, storage, resource, image):
max_size = 150
class Tester(db.Document):
image = ImageField(fs=storage, max_size=max_size)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'original': filename_original,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename_original
with open(storage.path(filename_original), 'rb') as f_orig:
with open(storage.path(filename), 'rb') as f_resized:
source = Image.open(image)
original = Image.open(f_orig)
resized = Image.open(f_resized)
assert original.size == source.size
assert resized.size[0] <= max_size
assert resized.size[1] <= max_size
resized_ratio = resized.size[0] / resized.size[1]
source_ratio = source.size[0] / source.size[1]
assert resized_ratio == pytest.approx(source_ratio, 1)
def test_save_thumbnails(self, storage, image, resource):
sizes = [150, 32]
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
filename = 'flask.{0}'.format(self.ext)
filename_150 = 'flask-150.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
with pytest.raises(ValueError):
tester.image.thumbnail(200)
assert filename in storage
assert filename_32 in storage
assert filename_150 in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'thumbnails': {
'32': filename_32,
'150': filename_150,
},
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
with open(storage.path(filename), 'rb') as f_orig:
with open(storage.path(filename_32), 'rb') as f_32:
with open(storage.path(filename_150), 'rb') as f_150:
source = Image.open(image)
original = Image.open(f_orig)
thumb_32 = Image.open(f_32)
thumb_150 = Image.open(f_150)
assert original.size == source.size
assert thumb_32.size <= (32, 32)
assert thumb_150.size <= (150, 150)
def test_save_thumbnails_with_bbox(self, storage, resource, image):
sizes = [150, 32]
bbox = (10, 10, 100, 100)
filename = 'flask.{0}'.format(self.ext)
filename_150 = 'flask-150.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
tester = Tester()
tester.image.save(resource, bbox=bbox)
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
# self.assertSequenceEqual(tester.image.bbox, bbox)
assert tester.image.bbox == bbox
with pytest.raises(ValueError):
tester.image.thumbnail(200)
assert filename in storage
assert filename_32 in storage
assert filename_150 in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'bbox': (10, 10, 100, 100),
'thumbnails': {
'32': filename_32,
'150': filename_150,
},
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
assert tuple(tester.image.bbox) == tuple(bbox)
# self.assertSequenceEqual(tester.image.bbox, bbox)
# with image as f:
with open(storage.path(filename), 'rb') as f_orig:
with open(storage.path(filename_32), 'rb') as f_32:
with open(storage.path(filename_150), 'rb') as f_150:
source = Image.open(image)
original = Image.open(f_orig)
thumb_32 = Image.open(f_32)
thumb_150 = Image.open(f_150)
assert original.size == source.size
assert thumb_32.size <= (32, 32)
assert thumb_150.size <= (150, 150)
def test_save_wih_two_fields(self, storage, resource):
sizes = [32]
bbox = (10, 10, 100, 100)
filename = 'flask.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
filename2 = 'flask2.{0}'.format(self.ext)
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
image2 = ImageField(fs=storage)
tester = Tester()
tester.image.save(resource, bbox=bbox)
tester.image2.save(resource, filename='flask2.{0}'.format(self.ext))
tester.validate()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.thumbnail(32) == filename_32
assert tuple(tester.image.bbox) == tuple(bbox)
assert tester.image2
assert str(tester.image2) == tester.image2.url
assert tester.image2.filename == filename2
assert tester.image2.bbox is None
assert filename in storage
assert filename_32 in storage
assert filename2 in storage
assert tester.to_mongo() == {
'image': {
'filename': filename,
'bbox': (10, 10, 100, 100),
'thumbnails': {
'32': filename_32,
},
},
'image2': {
'filename': filename2,
}
}
def test_save_and_update(self, storage, resource):
sizes = [150, 32]
bbox = (10, 10, 100, 100)
filename = 'flask.{0}'.format(self.ext)
filename_150 = 'flask-150.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
tester = Tester.objects.create()
tester.image.save(resource, bbox=bbox)
assert tester._changed_fields == ['image']
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == filename
assert tester.image.original == filename
assert tester.image.thumbnail(32) == filename_32
assert tester.image.thumbnail(150) == filename_150
assert tuple(tester.image.bbox) == tuple(bbox)
def test_best_match(self, storage, resource):
sizes = [150, 32]
# filename = 'flask.{0}'.format(self.ext)
filename_150 = 'flask-150.{0}'.format(self.ext)
filename_32 = 'flask-32.{0}'.format(self.ext)
filename2 = 'flask2.{0}'.format(self.ext)
class Tester(db.Document):
image = ImageField(fs=storage, thumbnails=sizes)
image2 = ImageField(fs=storage)
tester = Tester()
assert tester.image(150) is None
assert tester.image.best_url() is None
tester.image.save(resource)
tester.image2.save(resource, filename2)
assert tester.image.best_url(150) == storage.url(filename_150)
assert tester.image.best_url(140) == storage.url(filename_150)
assert tester.image.best_url(100) == storage.url(filename_150)
assert tester.image.best_url(32) == storage.url(filename_32)
assert tester.image.best_url(30) == storage.url(filename_32)
assert tester.image.best_url(160) == storage.url(filename_150)
assert tester.image.best_url() == storage.url(filename_150)
assert tester.image(150) == storage.url(filename_150)
assert tester.image(140) == storage.url(filename_150)
assert tester.image(160) == storage.url(filename_150)
assert tester.image2.best_url(150) == storage.url(filename2)
assert tester.image2.best_url() == storage.url(filename2)
def test_full(self, storage, resource):
max_size = 150
class Tester(db.Document):
image = ImageField(fs=storage, max_size=max_size)
filename = 'flask.{0}'.format(self.ext)
tester = Tester()
assert tester.image.full() is None
assert tester.image.full(external=True) is None
tester.image.save(resource)
assert tester.image.full() == storage.url(filename)
assert tester.image.full(external=True) == storage.url(filename, external=True)
def test_save_with_upload_to(self, storage, resource):
upload_to = 'prefix'
class Tester(db.Document):
image = ImageField(fs=storage, upload_to=upload_to)
filename = 'flask.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
expected_filename = '/'.join([upload_to, filename])
assert tester.image
assert tester.image.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'image': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == expected_filename
def test_save_with_callable_upload_to(self, storage, resource):
upload_to = 'prefix'
class Tester(db.Document):
image = ImageField(fs=storage, upload_to=lambda o: upload_to)
filename = 'flask.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource)
tester.validate()
expected_filename = '/'.join([upload_to, filename])
assert tester.image
assert tester.image.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'image': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == expected_filename
def test_save_with_callable_basename(self, storage, resource):
class Tester(db.Document):
image = ImageField(fs=storage, basename=lambda o: 'prefix/filename')
tester = Tester()
tester.image.save(resource)
tester.validate()
expected_filename = 'prefix/filename.{0}'.format(self.ext)
assert tester.image
assert tester.image.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'image': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == expected_filename
def test_save_with_callable_basename_override(self, storage, resource):
class Tester(db.Document):
image = ImageField(fs=storage, basename=lambda o: 'prefix/filename')
expected_filename = 'other.{0}'.format(self.ext)
tester = Tester()
tester.image.save(resource, expected_filename)
tester.validate()
assert tester.image
assert tester.image.filename == expected_filename
assert expected_filename in storage
assert tester.to_mongo() == {
'image': {
'filename': expected_filename,
}
}
tester.save()
tester = Tester.objects.get(id=tester.id)
assert tester.image.filename == expected_filename
def test_rerender(self, app, storage, resource, image):
class Tester(db.Document):
image = ImageField(fs=storage, optimize=True)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
storage.write(filename, image)
tester = Tester()
tester.image.filename = filename
assert tester.to_mongo() == {
'image': {
'filename': filename,
}
}
tester.image.rerender()
tester.save().reload()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'_id': tester.pk,
'image': {
'filename': filename,
'original': filename_original,
}
}
path_original = storage.path(filename_original)
path_optimized = storage.path(filename)
with open(path_original, 'rb') as f_orig:
with open(path_optimized, 'rb') as f_optimized:
source = Image.open(image)
original = Image.open(f_orig)
optimized = Image.open(f_optimized)
assert original.size == source.size
assert optimized.size == source.size
assert os.stat(path_optimized).st_size < os.stat(path_original).st_size
def test_rerender_multiple(self, app, storage, resource, image):
class Tester(db.Document):
image = ImageField(fs=storage, max_size=100, optimize=True)
filename = 'flask.{0}'.format(self.ext)
filename_original = 'flask-original.{0}'.format(self.ext)
storage.write(filename_original, image)
tester = Tester()
tester.image.original = filename_original
tester.image.filename = filename
assert tester.to_mongo() == {
'image': {
'original': filename_original,
'filename': filename,
}
}
tester.image.rerender()
tester.save().reload()
assert tester.image
assert str(tester.image) == tester.image.url
assert tester.image.filename == filename
assert tester.image.original == filename_original
assert filename in storage
assert tester.to_mongo() == {
'_id': tester.pk,
'image': {
'filename': filename,
'original': filename_original,
}
}
path_original = storage.path(filename_original)
path_optimized = storage.path(filename)
with open(path_original, 'rb') as f_orig:
with open(path_optimized, 'rb') as f_optimized:
source = Image.open(image)
original = Image.open(f_orig)
optimized = Image.open(f_optimized)
assert original.size == source.size
assert optimized.size[0] == 100
assert os.stat(path_optimized).st_size < os.stat(path_original).st_size
class ImageFieldPngTest(ImageFieldTestMixin):
ext = 'png'
@pytest.fixture
def image(self, pngfile):
with open(pngfile, 'rb') as f:
yield f
class ImageFieldJpgTest(ImageFieldTestMixin):
ext = 'jpg'
@pytest.fixture
def image(self, jpgfile):
with open(jpgfile, 'rb') as f:
yield f
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for TopK and Uniques sketch statistics generator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import pyarrow as pa
from tensorflow_data_validation import types
from tensorflow_data_validation.statistics.generators import top_k_uniques_sketch_stats_generator as sketch_generator
from tensorflow_data_validation.utils import test_util
from tensorflow_data_validation.utils.example_weight_map import ExampleWeightMap
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import schema_pb2
from tensorflow_metadata.proto.v0 import statistics_pb2
class TopKUniquesSketchStatsGeneratorTest(test_util.CombinerStatsGeneratorTest,
parameterized.TestCase):
"""Tests for TopKUniquesSketchStatsGenerator."""
def test_topk_uniques_sketch_with_single_bytes_feature(self):
# 'fa': 4 'a', 2 'b', 3 'c', 2 'd', 1 'e'
batches = [
pa.RecordBatch.from_arrays([
pa.array([['a', 'b', 'c', 'e'], ['a', 'c', 'd', 'a']],
type=pa.list_(pa.binary()))
], ['fa']),
pa.RecordBatch.from_arrays(
[pa.array([['a', 'b', 'c', 'd']], type=pa.list_(pa.binary()))],
['fa'])
]
# Note that if two feature values have the same frequency, the one with the
# lexicographically larger feature value will be higher in the order.
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: 'a'
frequency: 4
}
top_values {
value: 'c'
frequency: 3
}
top_values {
value: 'd'
frequency: 2
}
top_values {
value: 'b'
frequency: 2
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "c"
sample_count: 3.0
}
buckets {
low_rank: 2
high_rank: 2
label: "d"
sample_count: 2.0
}
}
}""", statistics_pb2.FeatureNameStatistics())
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_combiner_with_weights(self):
# non-weighted ordering
# fa: 3 'a', 2 'e', 2 'd', 2 'c', 1 'b'
# fb: 1 'v', 1 'w', 1 'x', 1 'y', 1 'z'
# weighted ordering
# fa: 20 'e', 20 'd', 15 'a', 10 'c', 5 'b'
# fb: 6 'z', 4 'x', 4 'y', 4 'w', 2 'v'
batches = [
pa.RecordBatch.from_arrays([
pa.array([['a', 'b', 'c', 'e'], ['a', 'c', 'd', 'a']]),
pa.array([['v'], ['w', 'x', 'y']]),
pa.array([[5.0], [5.0]]),
pa.array([[2.0], [4.0]]),
], ['fa', 'fb', 'w', 'w_b']),
pa.RecordBatch.from_arrays([
pa.array([['d', 'e']]),
pa.array([['z']]),
pa.array([[15.0]]),
pa.array([[6.0]]),
], ['fa', 'fb', 'w', 'w_b']),
]
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: 'a'
frequency: 3.0
}
top_values {
value: 'e'
frequency: 2.0
}
top_values {
value: 'd'
frequency: 2.0
}
top_values {
value: 'c'
frequency: 2.0
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 3.0
}
buckets {
low_rank: 1
high_rank: 1
label: "e"
sample_count: 2.0
}
buckets {
low_rank: 2
high_rank: 2
label: "d"
sample_count: 2.0
}
}
weighted_string_stats {
top_values {
value: 'e'
frequency: 20.0
}
top_values {
value: 'd'
frequency: 20.0
}
top_values {
value: 'a'
frequency: 15.0
}
top_values {
value: 'c'
frequency: 10.0
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "e"
sample_count: 20.0
}
buckets {
low_rank: 1
high_rank: 1
label: "d"
sample_count: 20.0
}
buckets {
low_rank: 2
high_rank: 2
label: "a"
sample_count: 15.0
}
}
}
}""", statistics_pb2.FeatureNameStatistics()),
types.FeaturePath(['fb']):
text_format.Parse(
"""
string_stats {
unique: 5
top_values {
value: "z"
frequency: 1.0
}
top_values {
value: "y"
frequency: 1.0
}
top_values {
value: "x"
frequency: 1.0
}
top_values {
value: "w"
frequency: 1.0
}
rank_histogram {
buckets {
label: "z"
sample_count: 1.0
}
buckets {
low_rank: 1
high_rank: 1
label: "y"
sample_count: 1.0
}
buckets {
low_rank: 2
high_rank: 2
label: "x"
sample_count: 1.0
}
}
weighted_string_stats {
top_values {
value: "z"
frequency: 6.0
}
top_values {
value: "y"
frequency: 4.0
}
top_values {
value: "x"
frequency: 4.0
}
top_values {
value: "w"
frequency: 4.0
}
rank_histogram {
buckets {
label: "z"
sample_count: 6.0
}
buckets {
low_rank: 1
high_rank: 1
label: "y"
sample_count: 4.0
}
buckets {
low_rank: 2
high_rank: 2
label: "x"
sample_count: 4.0
}
}
}
}
path {
step: "fb"
}""", statistics_pb2.FeatureNameStatistics()),
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
example_weight_map=ExampleWeightMap(
weight_feature='w',
per_feature_override={types.FeaturePath(['fb']): 'w_b'}),
num_top_values=4,
num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_sketch_with_single_unicode_feature(self):
# fa: 4 'a', 2 'b', 3 'c', 2 'd', 1 'e'
batches = [
pa.RecordBatch.from_arrays(
[pa.array([[u'a', u'b', u'c', u'e'], [u'a', u'c', u'd', u'a']])],
['fa']),
pa.RecordBatch.from_arrays([pa.array([[u'a', u'b', u'c', u'd']])],
['fa']),
]
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: 'a'
frequency: 4
}
top_values {
value: 'c'
frequency: 3
}
top_values {
value: 'd'
frequency: 2
}
top_values {
value: 'b'
frequency: 2
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "c"
sample_count: 3.0
}
buckets {
low_rank: 2
high_rank: 2
label: "d"
sample_count: 2.0
}
}
}""", statistics_pb2.FeatureNameStatistics())
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_sketch_with_multiple_features(self):
# fa: 4 'a', 2 'b', 3 'c', 2 'd', 1 'e'
# fb: 1 'a', 2 'b', 3 'c'
batches = [
pa.RecordBatch.from_arrays([
pa.array([['a', 'b', 'c', 'e'], None, ['a', 'c', 'd']]),
pa.array([['a', 'c', 'c'], ['b'], None]),
], ['fa', 'fb']),
pa.RecordBatch.from_arrays([
pa.array([['a', 'a', 'b', 'c', 'd'], None]),
pa.array([None, ['b', 'c']])
], ['fa', 'fb']),
]
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: 'a'
frequency: 4
}
top_values {
value: 'c'
frequency: 3
}
top_values {
value: 'd'
frequency: 2
}
top_values {
value: 'b'
frequency: 2
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "c"
sample_count: 3.0
}
buckets {
low_rank: 2
high_rank: 2
label: "d"
sample_count: 2.0
}
}
}""", statistics_pb2.FeatureNameStatistics()),
types.FeaturePath(['fb']):
text_format.Parse(
"""
path {
step: 'fb'
}
string_stats {
unique: 3
top_values {
value: 'c'
frequency: 3
}
top_values {
value: 'b'
frequency: 2
}
top_values {
value: 'a'
frequency: 1
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "c"
sample_count: 3.0
}
buckets {
low_rank: 1
high_rank: 1
label: "b"
sample_count: 2.0
}
buckets {
low_rank: 2
high_rank: 2
label: "a"
sample_count: 1.0
}
}
}""", statistics_pb2.FeatureNameStatistics())
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_sketch_zero_row(self):
batches = [
pa.RecordBatch.from_arrays([pa.array([], type=pa.list_(pa.binary()))],
['f1'])
]
expected_result = {}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_sketch_empty_record_batch(self):
batches = [pa.RecordBatch.from_arrays([], [])]
expected_result = {}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_sketch_with_missing_feature(self):
# fa: 4 'a', 2 'b', 3 'c', 2 'd', 1 'e'
# fb: 1 'a', 1 'b', 2 'c'
batches = [
pa.RecordBatch.from_arrays([
pa.array([['a', 'b', 'c', 'e'], None, ['a', 'c', 'd']]),
pa.array([['a', 'c', 'c'], ['b'], None]),
], ['fa', 'fb']),
pa.RecordBatch.from_arrays([
pa.array([['a', 'a', 'b', 'c', 'd'], None]),
], ['fa'])
]
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: 'a'
frequency: 4
}
top_values {
value: 'c'
frequency: 3
}
top_values {
value: 'd'
frequency: 2
}
top_values {
value: 'b'
frequency: 2
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "c"
sample_count: 3.0
}
buckets {
low_rank: 2
high_rank: 2
label: "d"
sample_count: 2.0
}
}
}""", statistics_pb2.FeatureNameStatistics()),
types.FeaturePath(['fb']):
text_format.Parse(
"""
path {
step: 'fb'
}
string_stats {
unique: 3
top_values {
value: 'c'
frequency: 2
}
top_values {
value: 'b'
frequency: 1
}
top_values {
value: 'a'
frequency: 1
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "c"
sample_count: 2.0
}
buckets {
low_rank: 1
high_rank: 1
label: "b"
sample_count: 1.0
}
buckets {
low_rank: 2
high_rank: 2
label: "a"
sample_count: 1.0
}
}
}""", statistics_pb2.FeatureNameStatistics())
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_sketch_with_numeric_feature(self):
# fa: 4 'a', 2 'b', 3 'c', 2 'd', 1 'e'
batches = [
pa.RecordBatch.from_arrays([
pa.array([['a', 'b', 'c', 'e'], None, ['a', 'c', 'd']]),
pa.array([[1.0, 2.0, 3.0], [4.0, 5.0], None]),
], ['fa', 'fb']),
pa.RecordBatch.from_arrays([
pa.array([['a', 'a', 'b', 'c', 'd']]),
pa.array([None], type=pa.list_(pa.float32())),
], ['fa', 'fb']),
]
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: 'a'
frequency: 4
}
top_values {
value: 'c'
frequency: 3
}
top_values {
value: 'd'
frequency: 2
}
top_values {
value: 'b'
frequency: 2
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "c"
sample_count: 3.0
}
buckets {
low_rank: 2
high_rank: 2
label: "d"
sample_count: 2.0
}
}
}""", statistics_pb2.FeatureNameStatistics())
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
@parameterized.named_parameters(
{
'testcase_name': 'int',
'is_float': False
}, {
'testcase_name': 'float',
'is_float': True
})
def test_topk_uniques_sketch_with_categorical_numeric_feature(
self, is_float):
# fa: 4 12, 2 23, 2 34, 2 45
def _map_nested_list(fn, val):
if isinstance(val, list):
return list([_map_nested_list(fn, v) for v in val])
return fn(val)
data = [[[12, 23, 34, 12], [45, 23]], [[12, 12, 34, 45]]]
if is_float == 'float':
data = _map_nested_list(float, data)
type_enum = 'FLOAT'
domain = 'float_domain'
else:
type_enum = 'INT'
domain = 'int_domain'
batches = [
pa.RecordBatch.from_arrays([pa.array(data[0])], ['fa']),
pa.RecordBatch.from_arrays([pa.array(data[1])], ['fa']),
]
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 4
top_values {
value: '12'
frequency: 4
}
top_values {
value: '45'
frequency: 2
}
top_values {
value: '34'
frequency: 2
}
top_values {
value: '23'
frequency: 2
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "12"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "45"
sample_count: 2.0
}
buckets {
low_rank: 2
high_rank: 2
label: "34"
sample_count: 2.0
}
}
}""", statistics_pb2.FeatureNameStatistics())
}
schema = text_format.Parse(
"""
feature {
name: "fa"
type: %s
%s {
is_categorical: true
}
}
""" % (type_enum, domain), schema_pb2.Schema())
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
schema=schema, num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_with_frequency_threshold(self):
batches = [
pa.RecordBatch.from_arrays([
pa.array([['a', 'b', 'y', 'b']]),
pa.array([[5.0]]),
], ['fa', 'w']),
pa.RecordBatch.from_arrays([
pa.array([['a', 'x', 'a', 'z']]),
pa.array([[15.0]]),
], ['fa', 'w'])
]
expected_result = {
types.FeaturePath(['fa']): text_format.Parse("""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: 'a'
frequency: 3
}
top_values {
value: 'b'
frequency: 2
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 3.0
}
buckets {
low_rank: 1
high_rank: 1
label: "b"
sample_count: 2.0
}
}
weighted_string_stats {
top_values {
value: 'a'
frequency: 35.0
}
top_values {
value: 'z'
frequency: 15.0
}
top_values {
value: 'x'
frequency: 15.0
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 35.0
}
buckets {
low_rank: 1
high_rank: 1
label: "z"
sample_count: 15.0
}
buckets {
low_rank: 2
high_rank: 2
label: "x"
sample_count: 15.0
}
}
}
}""", statistics_pb2.FeatureNameStatistics())
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
example_weight_map=ExampleWeightMap(weight_feature='w'),
num_top_values=5, frequency_threshold=2,
weighted_frequency_threshold=15, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_struct_leaves(self):
batches = [
pa.RecordBatch.from_arrays([
pa.array([[1.0], [2.0]]),
pa.array([[{
'f1': ['a', 'b'],
'f2': [1, 2]
}, {
'f1': ['b'],
}], [{
'f1': ['c', 'd'],
'f2': [2, 3]
}, {
'f2': [3]
}]]),
], ['w', 'c']),
pa.RecordBatch.from_arrays([
pa.array([[3.0]]),
pa.array([[{
'f1': ['d'],
'f2': [4]
}]]),
], ['w', 'c']),
]
schema = text_format.Parse(
"""
feature {
name: "c"
type: STRUCT
struct_domain {
feature {
name: "f2"
type: INT
int_domain {
is_categorical: true
}
}
}
}
""", schema_pb2.Schema())
expected_result = {
types.FeaturePath(['c', 'f1']):
text_format.Parse("""
string_stats {
unique: 4
top_values {
value: "d"
frequency: 2.0
}
top_values {
value: "b"
frequency: 2.0
}
top_values {
value: "c"
frequency: 1.0
}
rank_histogram {
buckets {
label: "d"
sample_count: 2.0
}
buckets {
low_rank: 1
high_rank: 1
label: "b"
sample_count: 2.0
}
buckets {
low_rank: 2
high_rank: 2
label: "c"
sample_count: 1.0
}
}
weighted_string_stats {
top_values {
value: "d"
frequency: 5.0
}
top_values {
value: "c"
frequency: 2.0
}
top_values {
value: "b"
frequency: 2.0
}
rank_histogram {
buckets {
label: "d"
sample_count: 5.0
}
buckets {
low_rank: 1
high_rank: 1
label: "c"
sample_count: 2.0
}
buckets {
low_rank: 2
high_rank: 2
label: "b"
sample_count: 2.0
}
}
}
}
path {
step: "c"
step: "f1"
}""", statistics_pb2.FeatureNameStatistics()),
types.FeaturePath(['c', 'f2']):
text_format.Parse("""
string_stats {
unique: 4
top_values {
value: "3"
frequency: 2.0
}
top_values {
value: "2"
frequency: 2.0
}
top_values {
value: "4"
frequency: 1.0
}
rank_histogram {
buckets {
label: "3"
sample_count: 2.0
}
buckets {
low_rank: 1
high_rank: 1
label: "2"
sample_count: 2.0
}
buckets {
low_rank: 2
high_rank: 2
label: "4"
sample_count: 1.0
}
}
weighted_string_stats {
top_values {
value: "3"
frequency: 4.0
}
top_values {
value: "4"
frequency: 3.0
}
top_values {
value: "2"
frequency: 3.0
}
rank_histogram {
buckets {
label: "3"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "4"
sample_count: 3.0
}
buckets {
low_rank: 2
high_rank: 2
label: "2"
sample_count: 3.0
}
}
}
}
path {
step: "c"
step: "f2"
}""", statistics_pb2.FeatureNameStatistics()),
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
schema=schema,
example_weight_map=ExampleWeightMap(weight_feature='w'),
num_top_values=3,
num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_sketch_with_int_weights(self):
# non-weighted ordering
# 3 'a', 2 'e', 2 'd', 2 'c', 1 'b'
# weighted ordering
# fa: 20 'e', 20 'd', 15 'a', 10 'c', 5 'b'
batches = [
pa.RecordBatch.from_arrays([
pa.array([['a', 'b', 'c', 'e'], ['a', 'c', 'd', 'a']],
type=pa.list_(pa.binary())),
pa.array([[5], [5]], type=pa.list_(pa.int32())),
], ['fa', 'w']),
pa.RecordBatch.from_arrays([
pa.array([['d', 'e']], type=pa.list_(pa.binary())),
pa.array([[15]], type=pa.list_(pa.int32())),
], ['fa', 'w']),
]
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: 'a'
frequency: 3.0
}
top_values {
value: 'e'
frequency: 2.0
}
top_values {
value: 'd'
frequency: 2.0
}
top_values {
value: 'c'
frequency: 2.0
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 3.0
}
buckets {
low_rank: 1
high_rank: 1
label: "e"
sample_count: 2.0
}
buckets {
low_rank: 2
high_rank: 2
label: "d"
sample_count: 2.0
}
}
weighted_string_stats {
top_values {
value: 'e'
frequency: 20.0
}
top_values {
value: 'd'
frequency: 20.0
}
top_values {
value: 'a'
frequency: 15.0
}
top_values {
value: 'c'
frequency: 10.0
}
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "e"
sample_count: 20.0
}
buckets {
low_rank: 1
high_rank: 1
label: "d"
sample_count: 20.0
}
buckets {
low_rank: 2
high_rank: 2
label: "a"
sample_count: 15.0
}
}
}
}""", statistics_pb2.FeatureNameStatistics())
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
example_weight_map=ExampleWeightMap(weight_feature='w'),
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_topk_uniques_sketch_with_weights_custom_stats(self):
# non-weighted ordering
# 3 'a', 2 'e', 2 'd', 2 'c', 1 'b'
# weighted ordering
# fa: 20 'e', 20 'd', 15 'a', 10 'c', 5 'b'
batches = [
pa.RecordBatch.from_arrays([
pa.array([['a', 'b', 'c', 'e'], ['a', 'c', 'd', 'a']]),
pa.array([[5.0], [5.0]]),
], ['fa', 'w']),
pa.RecordBatch.from_arrays([
pa.array([['d', 'e']]),
pa.array([[15.0]]),
], ['fa', 'w']),
]
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
custom_stats {
name: 'topk_sketch_rank_histogram'
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "a"
sample_count: 3.0
}
buckets {
low_rank: 1
high_rank: 1
label: "e"
sample_count: 2.0
}
buckets {
low_rank: 2
high_rank: 2
label: "d"
sample_count: 2.0
}
}
}
custom_stats {
name: 'weighted_topk_sketch_rank_histogram'
rank_histogram {
buckets {
low_rank: 0
high_rank: 0
label: "e"
sample_count: 20.0
}
buckets {
low_rank: 1
high_rank: 1
label: "d"
sample_count: 20.0
}
buckets {
low_rank: 2
high_rank: 2
label: "a"
sample_count: 15.0
}
}
}
custom_stats {
name: 'uniques_sketch_num_uniques'
num: 5
}""", statistics_pb2.FeatureNameStatistics())
}
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
example_weight_map=ExampleWeightMap(weight_feature='w'),
num_top_values=4, num_rank_histogram_buckets=3,
store_output_in_custom_stats=True)
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_schema_claims_categorical_int_but_actually_float(self):
schema = text_format.Parse("""
feature {
name: "a"
type: INT
int_domain { is_categorical: true }
}""", schema_pb2.Schema())
batches = [pa.RecordBatch.from_arrays([
pa.array([], type=pa.list_(pa.float32()))], ['a'])]
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
schema=schema,
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(
batches, generator, expected_feature_stats={})
def test_schema_claims_categorical_float_but_actually_int(self):
schema = text_format.Parse(
"""
feature {
name: "a"
type: FLOAT
float_domain { is_categorical: true }
}""", schema_pb2.Schema())
batches = [
pa.RecordBatch.from_arrays([pa.array([], type=pa.list_(pa.int64()))],
['a'])
]
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
schema=schema, num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(
batches, generator, expected_feature_stats={})
def test_schema_claimed_bytes(self):
schema = text_format.Parse("""
feature {
name: "a"
type: BYTES
# this makes the feature a bytes feature.
image_domain { }
}""", schema_pb2.Schema())
batches = [pa.RecordBatch.from_arrays([pa.array([[b'aaa']])], ['a'])]
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
schema=schema,
num_top_values=4, num_rank_histogram_buckets=3)
self.assertCombinerOutputEqual(
batches, generator, expected_feature_stats={})
def test_invalid_utf8_values(self):
# 4 'a', 3 invalid utf8, 1 'b', 1'c'
batches = [
pa.RecordBatch.from_arrays([
pa.array([[b'a', b'b', b'\x80', b'a'],
[b'a', b'\xC1', b'\x80', b'a']]),
], ['fa']),
pa.RecordBatch.from_arrays([
pa.array([['c']]),
], ['fa']),
]
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: "a"
frequency: 4.0
}
top_values {
value: "__BYTES_VALUE__"
frequency: 3.0
}
top_values {
value: "c"
frequency: 1.0
}
top_values {
value: "b"
frequency: 1.0
}
rank_histogram {
buckets {
label: "a"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "__BYTES_VALUE__"
sample_count: 3.0
}
buckets {
low_rank: 2
high_rank: 2
label: "c"
sample_count: 1.0
}
}
}
""", statistics_pb2.FeatureNameStatistics())
}
self.assertCombinerOutputEqual(batches, generator, expected_result)
def test_large_bytes_values(self):
# 4 'a', 3 large blob strings, 1 'b', 1'c'
batches = [
pa.RecordBatch.from_arrays([
pa.array([[b'a', b'b', b'f' * 1025, b'a'],
[b'a', b'f' * 1025, b'f' * 1026, b'a']]),
], ['fa']),
pa.RecordBatch.from_arrays([
pa.array([['c']]),
], ['fa']),
]
generator = sketch_generator.TopKUniquesSketchStatsGenerator(
num_top_values=4, num_rank_histogram_buckets=3)
expected_result = {
types.FeaturePath(['fa']):
text_format.Parse(
"""
path {
step: 'fa'
}
string_stats {
unique: 5
top_values {
value: "a"
frequency: 4.0
}
top_values {
value: "__LARGE_BYTES__"
frequency: 3.0
}
top_values {
value: "c"
frequency: 1.0
}
top_values {
value: "b"
frequency: 1.0
}
rank_histogram {
buckets {
label: "a"
sample_count: 4.0
}
buckets {
low_rank: 1
high_rank: 1
label: "__LARGE_BYTES__"
sample_count: 3.0
}
buckets {
low_rank: 2
high_rank: 2
label: "c"
sample_count: 1.0
}
}
}
""",
statistics_pb2.FeatureNameStatistics())
}
self.assertCombinerOutputEqual(batches, generator, expected_result)
if __name__ == '__main__':
absltest.main()
| |
import datetime
import json
from datetime import date
from django.contrib import messages
from django.contrib.auth import views as authviews
from django.contrib.auth.tokens import default_token_generator
from django.contrib.auth import authenticate, get_user_model, login, update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
from django.contrib.auth.views import password_reset
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.shortcuts import render
from django.urls import reverse
from django.views.generic import *
from django.views.generic.edit import FormMixin
from whatachore.tasks import pw_email, user_to_worker
from wac.forms import PersonEditForm
from .forms import AccountSettingsForm, EmailLoginForm, EmailWorkerForm, RegistrationForm, PasswordResetRequestForm
from wac.views import PersonCreateView
from wac.models import Assignment, Person, Week
from .models import User
def my_password_reset(request):
if request.is_ajax() and request.method == 'POST':
form = PasswordResetRequestForm(data=request.POST)
if form.is_valid():
email = form.cleaned_data['email']
user = User.objects.get(email=email)
token = default_token_generator.make_token(user)
pw_email.delay(email, token)
messages.success(request, 'An email has been sent to ' + email +". Please check your inbox to continue resetting your password.")
response_data = {}
response_data['status'] = 'success'
response_data['url'] = 'landing'
return HttpResponse(json.dumps(response_data),
content_type='application/json')
else:
form = PasswordResetRequestForm()
return render(request, 'registration/password_reset_form.html', {'form': form})
def register(request):
if request.method == 'POST':
form = RegistrationForm(data=request.POST)
if form.is_valid():
new_user = get_user_model().objects._create_user(**form.cleaned_data)
login(request, new_user)
return HttpResponseRedirect('../welcome')
else:
return render(request, 'landing.html', {'form': form})
else:
form = RegistrationForm()
return HttpResponseRedirect(reverse('landing'))
class WelcomeOneView(PersonCreateView):
template_name = 'welcomeNew.html'
def get(self, request, *args, **kwargs):
theUser = request.user
form = PersonEditForm(initial={'email': theUser.email,})
return render(request, 'useraccounts/welcomeNew.html', {'form': form})
class WelcomeTwoView(TemplateView):
template_name = 'welcomeNew2.html'
class WelcomeLastView(TemplateView):
template_name = 'welcomeNewLast.html'
class HomeView(TemplateView):
model = User
template_name = 'home.html'
def get(self, request):
user = self.request.user
if user.is_authenticated():
return render(request, self.template_name, self.get_context_data())
else:
messages.warning(request, "Please log in or create an account.")
return HttpResponseRedirect(reverse('landing'))
def profiled(self):
theUser = Person.objects.filter(
user = self.request.user
).filter(
email__exact=self.request.user.email
)
if len(theUser):
return theUser[0].name
else:
return self.request.user.email
def get_context_data(self, **kwargs):
current_day = date.today()
context = super(HomeView, self).get_context_data(**kwargs)
context['people'] = Person.objects.filter(
user = self.request.user
)
context['assignments'] = Assignment.objects.filter(
week__user = self.request.user
).filter(
week__is_current = True
).filter(
when = current_day
)
context['not_done'] = Assignment.objects.filter(
week__user = self.request.user
).filter(
week__is_current = True
).filter(
when__lt=current_day
).filter(
done = False
).order_by("when")
context['old_weeks'] = Week.objects.filter(
user = self.request.user
).exclude(
is_current = True
)
return context
def email_to_worker(request):
template_name = 'email_worker_modal.html'
if request.is_ajax() and request.method == 'POST':
form = EmailWorkerForm(data=request.POST)
if form.is_valid():
# gather email pieces
recipient_list = form.cleaned_data['recipient_email']
subject = form.cleaned_data['subject']
message = form.cleaned_data['message']
# call email task
user_to_worker.delay(recipient_list, subject, message)
messages.success(request, "Your email is on its way.")
response_data = {}
response_data['status'] = 'success'
return HttpResponse(json.dumps(response_data), content_type="application/json")
else:
response_data = {}
response_data['status'] = 'fail'
return HttpResponseBadRequest(json.dumps(form.errors), content_type="application/json")
else:
recipient_list = ''
person_pk = request.GET.get('person')
if person_pk:
worker = Person.objects.get(pk=person_pk)
recipient_list.append(worker.email)
form = EmailWorkerForm()
if worker.email:
form = EmailWorkerForm(initial={'recipient_email': worker.email})
return render(request, template_name, {'form': form})
else:
messages.warning(request, "Looks like that worker doesn't have an email address on file.")
return render(request, template_name, {'form': form})
else:
messages.info(request, "Emails will only be sent to workers with an email address on file.")
# get all workers
workers = Person.objects.filter(
user = request.user
).exclude(
email = None
)
for worker in workers:
recipient_list += worker.email + ', '
# set recipient email field to list of all emails.
form = EmailWorkerForm(initial={'recipient_email': recipient_list})
return render(request, template_name, {'form': form})
class OldWeekView(DetailView):
model = Week
template_name = 'passed_week_modal.html'
def get_context_data(self, **kwargs):
context = super(OldWeekView, self).get_context_data(**kwargs)
context['assignments'] = Assignment.objects.filter(
week__pk=kwargs['object'].pk
).order_by("when")
start_date = kwargs['object'].start_date
context['dates'] = []
for i in range(0,7):
context['dates'].append(start_date+datetime.timedelta(days=i))
return context
class AccountSettings(TemplateView):
model = User
template_name = 'account_settings.html'
def get_context_data(self, **kwargs):
context = super(AccountSettings, self).get_context_data(**kwargs)
context['user'] = self.request.user
return context
def get(self, request, *args, **kwargs):
user = self.request.user
form = AccountSettingsForm(initial={
'no_email': user.doNotEmail
})
return render(request, self.template_name, {'form': form, 'user': user})
def post(self, request, *args, **kwargs):
form = AccountSettingsForm(data=self.request.POST)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
user = self.request.user
no_email_value = form.cleaned_data['no_email']
user.doNotEmail = no_email_value
user.save(update_fields=['doNotEmail'])
return HttpResponse(reverse('home-view'))
class AjaxTemplateMixin(object):
def dispatch(self, request, *args, **kwargs):
if not hasattr(self, 'ajax_template_name'):
split = self.template_name.split('.html')
split[-1] = '_inner'
split.append('.html')
self.ajax_template_name = ''.join(split)
if request.is_ajax():
self.template_name = self.ajax_template_name
return super(AjaxTemplateMixin, self).dispatch(request, *args, **kwargs)
class EmailLoginView(AjaxTemplateMixin, TemplateView, authviews.LoginView):
template_name = 'useraccounts/registration/login.html'
form_class = EmailLoginForm
def dispatch(self, request, *args, **kwargs):
return super(EmailLoginView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = self.get_form()
return context
def get(self, request, *args, **kwargs):
form = EmailLoginForm(request)
return render(request, 'useraccounts/registration/login.html', context={'form': form})
def post(self, request, *args, **kwargs):
form = EmailLoginForm(request, data=self.request.POST)
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
username = form.cleaned_data['username']
password = form.cleaned_data['password']
login(self.request, form.get_user())
response_data = {}
response_data['status'] = 'success'
response_data['url'] = '/useraccounts/home'
response_data['email'] = self.request.user.email
return HttpResponse(json.dumps(response_data),
content_type='application/json')
def form_invalid(self, form):
response_data = {}
response_data['status'] = 'fail'
return HttpResponseBadRequest(json.dumps(form.errors), content_type='application/json')
def login_page(request):
if request.method == 'POST':
form = EmailLoginForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data['username']
password = form.cleaned_data['password']
login(request, form.get_user())
response_data = {}
response_data['status'] = 'success'
response_data['url'] = '/useraccounts/home'
return HttpResponse(json.dumps(response_data),
content_type='application/json')
else:
response_data = {}
response_data['status'] = 'fail'
return HttpResponseBadRequest(json.dumps(form.errors), content_type='application/json')
else:
form = EmailLoginForm()
return render(request, 'useraccounts/login_page.html', context={'form': form})
class ChangePasswordView(FormMixin, AjaxTemplateMixin, TemplateView):
template_name = 'useraccounts/registration/password_change_form.html'
form_class = PasswordChangeForm
def dispatch(self, request, *args, **kwargs):
return super(ChangePasswordView, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['form'] = self.get_form()
return context
def get(self, request, *args, **kwargs):
form = PasswordChangeForm(user=self.request.user)
return render(request, 'useraccounts/registration/password_change_form.html', context={'form': form})
def post(self, request, *args, **kwargs):
form = PasswordChangeForm(user=self.request.user, data=self.request.POST)
user = self.request.user
if form.is_valid():
return self.form_valid(user, form)
else:
return self.form_invalid(form)
def form_valid(self, user, form):
new_pass = form.cleaned_data['new_password1']
user.set_password(new_pass)
user.save()
update_session_auth_hash(self.request, user)
messages.success(self.request, "Your password was updated successfully!")
response_data = {}
response_data['status'] = 'success'
# response_data['messages'] = 'Your password was updated successfully'
return HttpResponse(json.dumps(response_data), content_type="application/json")
def form_invalid(self, form):
response_data = {}
response_data['status'] = 'fail'
return HttpResponseBadRequest(json.dumps(form.errors), content_type="application/json")
def change_password_done(request):
return HttpResponseRedirect(reverse('home-view'))
| |
# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
from math import sqrt
import os
import unittest
import inspyred
import pickle
from ordered_set import OrderedSet
from pandas.util.testing import assert_frame_equal
import six
from cameo import load_model, fba, config
from cameo.strain_design.heuristic.genomes import MultipleChromosomeGenome
from cameo.strain_design.heuristic.metrics import euclidean_distance
from cameo.strain_design.heuristic.metrics import manhattan_distance
from cameo.strain_design.heuristic.variators import _do_set_n_point_crossover, set_n_point_crossover, set_mutation, \
set_indel, multiple_chromosome_set_mutation, multiple_chromosome_set_indel
from cameo.util import RandomGenerator as Random
from cameo.strain_design.heuristic.optimization import HeuristicOptimization, ReactionKnockoutOptimization, \
set_distance_function, KnockoutOptimizationResult
from cameo.strain_design.heuristic.archivers import SolutionTuple, BestSolutionArchiver
from cameo.strain_design.heuristic.decoders import ReactionKnockoutDecoder, KnockoutDecoder, GeneKnockoutDecoder
from cameo.strain_design.heuristic.generators import set_generator, unique_set_generator, \
multiple_chromosome_set_generator
from cameo.strain_design.heuristic.objective_functions import biomass_product_coupled_yield, product_yield, \
number_of_knockouts
from cobra.manipulation.delete import find_gene_knockout_reactions
from cameo.parallel import SequentialView, MultiprocessingView
from six.moves import range
TRAVIS = os.getenv('TRAVIS', False)
SEED = 1234
CURRENT_PATH = os.path.dirname(__file__)
MODEL_PATH = os.path.join(CURRENT_PATH, "data/EcoliCore.xml")
TEST_MODEL = load_model(MODEL_PATH, sanitize=False)
SOLUTIONS = [
[[1, 2, 3], 0.1],
[[1, 3, 2, 4], 0.1],
[[2, 3, 4], 0.45],
[[62, 51, 4], 0.2],
[[5, 3, 4, 51], 0.9],
[[5, 23, 41, 51], 0.9],
[[5, 3, 4, 51, 31], 0.9],
[[5, 3, 4, 51], 0.9],
[[44, 12, 42, 51], 0.0],
[[52, 22, 4, 11], 0.0]
]
class TestMetrics(unittest.TestCase):
def test_euclidean_distance(self):
distance = euclidean_distance({'a': 9}, {'a': 3})
self.assertEqual(distance, sqrt((9-3)**2))
def test_manhattan_distance(self):
distance = manhattan_distance({'a': 9}, {'a': 3})
self.assertEqual(distance, abs(9-3))
class TestBestSolutionArchiver(unittest.TestCase):
def test_solution_string(self):
sol1 = SolutionTuple(SOLUTIONS[0][0], SOLUTIONS[0][1])
sol2 = SolutionTuple(SOLUTIONS[1][0], SOLUTIONS[1][1])
sol3 = SolutionTuple(SOLUTIONS[2][0], SOLUTIONS[2][1])
self.assertEqual(sol1.__str__(), "[1, 2, 3] - 0.1 sense: max")
self.assertEqual(sol2.__str__(), "[1, 2, 3, 4] - 0.1 sense: max")
self.assertEqual(sol3.__str__(), "[2, 3, 4] - 0.45 sense: max")
def test_solution_comparison_maximization(self):
sol1 = SolutionTuple(SOLUTIONS[0][0], SOLUTIONS[0][1])
sol2 = SolutionTuple(SOLUTIONS[1][0], SOLUTIONS[1][1])
sol3 = SolutionTuple(SOLUTIONS[2][0], SOLUTIONS[2][1])
# test ordering
self.assertEqual(sol1.__cmp__(sol2), -1)
self.assertEqual(sol1.__cmp__(sol1), 0)
self.assertEqual(sol1.__cmp__(sol3), 1)
self.assertTrue(sol1 < sol2)
self.assertTrue(sol1 == sol1)
self.assertTrue(sol1 > sol3)
# test gt and lt
self.assertTrue(sol1.__lt__(sol2))
self.assertTrue(sol1.__gt__(sol3))
self.assertFalse(sol1.__lt__(sol1))
self.assertFalse(sol1.__gt__(sol1))
self.assertFalse(sol2.__lt__(sol1))
self.assertFalse(sol3.__gt__(sol1))
# testing issubset
self.assertTrue(sol1.issubset(sol2), msg="Solution 1 is subset of Solution 2")
self.assertFalse(sol2.issubset(sol1), msg="Solution 2 is not subset of Solution 1")
self.assertTrue(sol3.issubset(sol2), msg="Solution 3 is subset of Solution 2")
self.assertFalse(sol2.issubset(sol3), msg="Solution 2 is not subset of Solution 3")
self.assertFalse(sol1.issubset(sol3), msg="Solution 1 is subset of Solution 3")
self.assertFalse(sol2.issubset(sol3), msg="Solution 3 is not subset of Solution 1")
# test difference
l = len(sol2.symmetric_difference(sol1))
self.assertEqual(l, 1, msg="Difference between Solution 2 and 1 is (%s)" % sol2.symmetric_difference(sol1))
l = len(sol3.symmetric_difference(sol2))
self.assertEqual(l, 1, msg="Difference between Solution 3 and 1 is (%s)" % sol3.symmetric_difference(sol2))
l = len(sol3.symmetric_difference(sol1))
self.assertEqual(l, 2, msg="Difference between Solution 1 and 3 is (%s)" % sol3.symmetric_difference(sol1))
self.assertTrue(sol1.improves(sol2), msg="Solution 1 is better than Solution 2")
self.assertTrue(sol3.improves(sol2), msg="Solution 3 is better than Solution 2")
self.assertFalse(sol3.improves(sol1), msg="Solution 3 does not improve Solution 1")
self.assertFalse(sol2.improves(sol1), msg="Solution 2 does not improve Solution 1")
self.assertFalse(sol2.improves(sol3), msg="Solution 2 does not improve Solution 3")
def test_solution_comparison_minimization(self):
sol1 = SolutionTuple(SOLUTIONS[0][0], SOLUTIONS[0][1], maximize=False)
sol2 = SolutionTuple(SOLUTIONS[1][0], SOLUTIONS[1][1], maximize=False)
sol3 = SolutionTuple(SOLUTIONS[2][0], SOLUTIONS[2][1], maximize=False)
# test ordering
self.assertEqual(sol1.__cmp__(sol2), -1)
self.assertEqual(sol1.__cmp__(sol1), 0)
self.assertEqual(sol1.__cmp__(sol3), -1)
self.assertTrue(sol1 < sol2)
self.assertTrue(sol1 == sol1)
self.assertTrue(sol1 < sol3)
# test gt and lt
self.assertTrue(sol1.__lt__(sol2))
self.assertTrue(sol1.__lt__(sol3))
self.assertFalse(sol1.__gt__(sol1))
self.assertFalse(sol1.__lt__(sol1))
self.assertTrue(sol2.__gt__(sol1))
self.assertFalse(sol3.__lt__(sol1))
# testing issubset
self.assertTrue(sol1.issubset(sol2), msg="Solution 1 is subset of Solution 2")
self.assertFalse(sol2.issubset(sol1), msg="Solution 2 is not subset of Solution 1")
self.assertTrue(sol3.issubset(sol2), msg="Solution 3 is subset of Solution 2")
self.assertFalse(sol2.issubset(sol3), msg="Solution 2 is not subset of Solution 3")
self.assertFalse(sol1.issubset(sol3), msg="Solution 1 is subset of Solution 3")
self.assertFalse(sol2.issubset(sol3), msg="Solution 3 is not subset of Solution 1")
# test difference
l = len(sol2.symmetric_difference(sol1))
self.assertEqual(l, 1, msg="Difference between Solution 2 and 1 is (%s)" % sol2.symmetric_difference(sol1))
l = len(sol3.symmetric_difference(sol2))
self.assertEqual(l, 1, msg="Difference between Solution 3 and 1 is (%s)" % sol3.symmetric_difference(sol2))
l = len(sol3.symmetric_difference(sol1))
self.assertEqual(l, 2, msg="Difference between Solution 1 and 3 is (%s)" % sol3.symmetric_difference(sol1))
self.assertTrue(sol1.improves(sol2), msg="Solution 1 is better than Solution 2")
self.assertFalse(sol3.improves(sol2), msg="Solution 3 is not better than Solution 2")
self.assertFalse(sol3.improves(sol1), msg="Solution 3 does not improve Solution 1")
self.assertFalse(sol2.improves(sol1), msg="Solution 2 does not improve Solution 1")
self.assertFalse(sol2.improves(sol3), msg="Solution 2 does not improve Solution 3")
def test_add_greater_solution_with_same_fitness(self):
size = 1
pool = BestSolutionArchiver()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], size)
self.assertEqual(pool.length(), 1, msg="Pool must keep one solution (length=%s)" % pool.length())
best_solution = set(SOLUTIONS[0][0])
best_fitness = SOLUTIONS[0][1]
sol = pool.get(0)
self.assertEqual(sol.candidate, best_solution, msg="Best solution set must be the first")
self.assertEqual(sol.fitness, best_fitness, msg="Best solution fitness must be the first")
def test_add_smaller_solution_with_same_fitness(self):
size = 1
pool = BestSolutionArchiver()
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], size)
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], size)
self.assertEqual(pool.length(), 1, msg="Pool must keep one solution (length=%s)" % pool.length())
solution = set(SOLUTIONS[0][0])
fitness = SOLUTIONS[0][1]
sol = pool.get(0)
self.assertEqual(sol.candidate, solution, msg="Best solution must be the first (%s)" % sol.candidate)
self.assertEqual(sol.fitness, fitness, msg="Best fitness must be the first (%s)" % sol.fitness)
def test_uniqueness_of_solutions(self):
size = 2
pool = BestSolutionArchiver()
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], size)
self.assertEqual(pool.length(), 1, "Added repeated solution")
def test_pool_size_limit(self):
size = 1
pool = BestSolutionArchiver()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], size)
pool.add(SOLUTIONS[2][0], SOLUTIONS[2][1], size)
pool.add(SOLUTIONS[3][0], SOLUTIONS[3][1], size)
pool.add(SOLUTIONS[4][0], SOLUTIONS[4][1], size)
pool.add(SOLUTIONS[5][0], SOLUTIONS[5][1], size)
pool.add(SOLUTIONS[6][0], SOLUTIONS[6][1], size)
pool.add(SOLUTIONS[7][0], SOLUTIONS[7][1], size)
pool.add(SOLUTIONS[8][0], SOLUTIONS[8][1], size)
pool.add(SOLUTIONS[9][0], SOLUTIONS[9][1], size)
self.assertLessEqual(pool.length(), 1, msg="Pool must keep one solution (length=%s)" % pool.length())
size = 2
pool = BestSolutionArchiver()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], size)
pool.add(SOLUTIONS[2][0], SOLUTIONS[2][1], size)
pool.add(SOLUTIONS[3][0], SOLUTIONS[3][1], size)
pool.add(SOLUTIONS[4][0], SOLUTIONS[4][1], size)
pool.add(SOLUTIONS[5][0], SOLUTIONS[5][1], size)
pool.add(SOLUTIONS[6][0], SOLUTIONS[6][1], size)
pool.add(SOLUTIONS[7][0], SOLUTIONS[7][1], size)
pool.add(SOLUTIONS[8][0], SOLUTIONS[8][1], size)
pool.add(SOLUTIONS[9][0], SOLUTIONS[9][1], size)
self.assertLessEqual(pool.length(), 2, msg="Pool must keep one solution (length=%s)" % pool.length())
size = 3
pool = BestSolutionArchiver()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], size)
pool.add(SOLUTIONS[2][0], SOLUTIONS[2][1], size)
pool.add(SOLUTIONS[3][0], SOLUTIONS[3][1], size)
pool.add(SOLUTIONS[4][0], SOLUTIONS[4][1], size)
pool.add(SOLUTIONS[5][0], SOLUTIONS[5][1], size)
pool.add(SOLUTIONS[6][0], SOLUTIONS[6][1], size)
pool.add(SOLUTIONS[7][0], SOLUTIONS[7][1], size)
pool.add(SOLUTIONS[8][0], SOLUTIONS[8][1], size)
pool.add(SOLUTIONS[9][0], SOLUTIONS[9][1], size)
self.assertLessEqual(pool.length(), 3, msg="Pool must keep one solution (length=%s)" % pool.length())
size = 4
pool = BestSolutionArchiver()
pool.add(SOLUTIONS[0][0], SOLUTIONS[0][1], size)
pool.add(SOLUTIONS[1][0], SOLUTIONS[1][1], size)
pool.add(SOLUTIONS[2][0], SOLUTIONS[2][1], size)
pool.add(SOLUTIONS[3][0], SOLUTIONS[3][1], size)
pool.add(SOLUTIONS[4][0], SOLUTIONS[4][1], size)
pool.add(SOLUTIONS[5][0], SOLUTIONS[5][1], size)
pool.add(SOLUTIONS[6][0], SOLUTIONS[6][1], size)
pool.add(SOLUTIONS[7][0], SOLUTIONS[7][1], size)
pool.add(SOLUTIONS[8][0], SOLUTIONS[8][1], size)
pool.add(SOLUTIONS[9][0], SOLUTIONS[9][1], size)
self.assertLessEqual(pool.length(), 4, msg="Pool must keep one solution (length=%s)" % pool.length())
def test_callable_pool(self):
pool = BestSolutionArchiver()
size = 3
args = {}
args.setdefault('max_archive_size', size)
population = [SolutionTuple(SOLUTIONS[0][0], SOLUTIONS[0][1]),
SolutionTuple(SOLUTIONS[1][0], SOLUTIONS[1][1]),
SolutionTuple(SOLUTIONS[2][0], SOLUTIONS[2][1]),
SolutionTuple(SOLUTIONS[3][0], SOLUTIONS[3][1]),
SolutionTuple(SOLUTIONS[4][0], SOLUTIONS[4][1]),
SolutionTuple(SOLUTIONS[5][0], SOLUTIONS[5][1]),
SolutionTuple(SOLUTIONS[6][0], SOLUTIONS[6][1])]
archive = pool(None, population, [], args)
self.assertEqual(pool.length(), size)
for sol in pool:
self.assertTrue(sol in archive)
class TestObjectiveFunctions(unittest.TestCase):
class _MockupSolution():
def __init__(self):
self._primal = {}
def set_primal(self, k, v):
self._primal[k] = v
def get_primal_by_id(self, k):
return self._primal[k]
@property
def fluxes(self):
return self._primal
def test_biomass_product_coupled_yield(self):
solution = self._MockupSolution()
solution.set_primal('biomass', 0.6)
solution.set_primal('product', 2)
solution.set_primal('substrate', -10)
of = biomass_product_coupled_yield("biomass", "product", "substrate")
fitness = of(None, solution, None)
self.assertAlmostEqual((0.6 * 2) / 10, fitness)
solution.set_primal('substrate', 0)
fitness = of(None, solution, None)
self.assertEquals(0, fitness)
def test_yield(self):
solution = self._MockupSolution()
solution.set_primal('biomass', 0.6)
solution.set_primal('product', 2)
solution.set_primal('substrate', -10)
of = product_yield("product", "substrate")
fitness = of(None, solution, None)
self.assertAlmostEqual(2.0 / 10.0, fitness)
solution.set_primal('substrate', 0)
fitness = of(None, solution, None)
self.assertEquals(0, fitness)
def test_number_of_knockouts(self):
of_max = number_of_knockouts(sense='max')
of_min = number_of_knockouts(sense='min')
f1 = of_max(None, None, [['a', 'b'], ['a', 'b']])
f2 = of_max(None, None, [['a', 'b'], ['a', 'b', 'c']])
self.assertGreater(f2, f1)
f1 = of_min(None, None, [['a', 'b'], ['a', 'b']])
f2 = of_min(None, None, [['a', 'b'], ['a', 'b', 'c']])
self.assertGreater(f1, f2)
class TestDecoders(unittest.TestCase):
def setUp(self):
self.model = TEST_MODEL
def test_abstract_decoder(self):
decoder = KnockoutDecoder(None, self.model)
self.assertRaises(NotImplementedError, decoder, [])
def test_reaction_knockout_decoder(self):
decoder = ReactionKnockoutDecoder([r.id for r in self.model.reactions], self.model)
reactions1, reactions2 = decoder([1, 2, 3, 4])
self.assertTrue(sorted(reactions1, key=lambda x: x.id) == sorted(reactions2, key=lambda x: x.id))
def test_gene_knockout_decoder(self):
decoder = GeneKnockoutDecoder([g.id for g in self.model.genes], self.model)
reactions1, genes = decoder([1, 2, 3, 4])
reactions2 = find_gene_knockout_reactions(self.model, genes)
self.assertTrue(sorted(reactions1, key=lambda x: x.id) == sorted(reactions2, key=lambda x: x.id))
class TestGenerators(unittest.TestCase):
def setUp(self):
self.model = TEST_MODEL
self.args = {}
self.args.setdefault('representation', [r.id for r in self.model.reactions])
self.random = Random()
def test_set_generator(self):
random = Random(SEED)
representation = ["a", "b", "c", "d", "e", "f"]
candidate_size = 5
variable_candidate_size = False
expected = [[2, 1, 5, 0, 4],
[0, 4, 3, 2, 5],
[1, 0, 3, 2, 5],
[2, 3, 1, 4, 5],
[4, 5, 3, 0, 2]]
for i in range(len(expected)):
candidate = set_generator(random, dict(representation=representation,
candidate_size=candidate_size,
variable_candidate_size=variable_candidate_size))
self.assertEqual(candidate, expected[i])
def test_multiple_chromossome_set_generator(self):
random = Random(SEED)
args = dict(keys=["test_key_1", "test_key_2"],
test_key_1_representation=["a1", "a2", "a3", "a4", "a5"],
test_key_2_representation=["b1", "b2", "b3", "b4", "b5", "b6", "b7"],
test_key_1_candidate_size=3,
test_key_2_candidate_size=5,
variable_candidate_size=False)
candidate = multiple_chromosome_set_generator(random, args)
self.assertEqual(len(candidate['test_key_1']), 3)
self.assertEqual(len(candidate['test_key_2']), 5)
def test_fixed_size_generator(self):
self.args.setdefault('variable_candidate_size', False)
self.args['candidate_size'] = 10
for _ in range(10000):
candidate = set_generator(self.random, self.args)
self.assertEqual(len(candidate), 10)
candidate = unique_set_generator(self.random, self.args)
self.assertEqual(len(candidate), 10)
self.args['candidate_size'] = 20
for _ in range(10000):
candidate = set_generator(self.random, self.args)
self.assertEqual(len(candidate), 20)
candidate = unique_set_generator(self.random, self.args)
self.assertEqual(len(candidate), 20)
def test_variable_size_generator(self):
self.args.setdefault('variable_candidate_size', True)
self.args['candidate_size'] = 10
for _ in range(10000):
candidate = set_generator(self.random, self.args)
self.assertLessEqual(len(candidate), 10)
candidate = unique_set_generator(self.random, self.args)
self.assertLessEqual(len(candidate), 10)
self.args['candidate_size'] = 20
for _ in range(10000):
candidate = set_generator(self.random, self.args)
self.assertLessEqual(len(candidate), 20)
candidate = unique_set_generator(self.random, self.args)
self.assertLessEqual(len(candidate), 20)
class TestHeuristicOptimization(unittest.TestCase):
def setUp(self):
self.model = TEST_MODEL
self.single_objective_function = product_yield('product', 'substrate')
self.multiobjective_function = [
product_yield('product', 'substrate'),
number_of_knockouts()
]
def test_default_initializer(self):
heuristic_optimization = HeuristicOptimization(
model=self.model,
objective_function=self.single_objective_function
)
self.assertIsNone(heuristic_optimization._generator)
self.assertIsNot(heuristic_optimization.seed, SEED)
self.assertEqual(heuristic_optimization.model, self.model)
self.assertEqual(heuristic_optimization.objective_function, self.single_objective_function)
heuristic_optimization = HeuristicOptimization(
model=self.model,
objective_function=self.single_objective_function,
seed=SEED
)
self.assertIsNone(heuristic_optimization._generator)
self.assertEqual(heuristic_optimization.seed, SEED)
self.assertEqual(heuristic_optimization.model, self.model)
self.assertEqual(heuristic_optimization.objective_function, self.single_objective_function)
def test_multiobjective_initializer(self):
heuristic_optimization = HeuristicOptimization(
model=self.model,
objective_function=self.multiobjective_function,
heuristic_method=inspyred.ec.emo.NSGA2
)
self.assertIsNone(heuristic_optimization._generator)
self.assertIsNot(heuristic_optimization.seed, SEED)
self.assertEqual(heuristic_optimization.model, self.model)
self.assertEqual(len(heuristic_optimization.objective_function), 2)
heuristic_optimization = HeuristicOptimization(
model=self.model,
objective_function=self.multiobjective_function,
heuristic_method=inspyred.ec.emo.NSGA2,
seed=SEED
)
self.assertIsNone(heuristic_optimization._generator)
self.assertEqual(heuristic_optimization.seed, SEED)
self.assertEqual(heuristic_optimization.model, self.model)
self.assertEqual(len(heuristic_optimization.objective_function), 2)
def test_invalid_initializer(self):
self.assertRaises(TypeError, HeuristicOptimization,
model=self.model,
objective_function=self.multiobjective_function,
heuristic_method=inspyred.ec.GA)
def test_single_objective_function_with_multiobjective_initializer(self):
heuristic_optimization = HeuristicOptimization(
model=self.model,
objective_function=self.single_objective_function,
heuristic_method=inspyred.ec.emo.NSGA2
)
self.assertEqual(len(heuristic_optimization.objective_function), 1)
def test_change_objective_function(self):
single_objective_heuristic = HeuristicOptimization(
model=self.model,
objective_function=self.single_objective_function,
)
nok = number_of_knockouts()
single_objective_heuristic.objective_function = nok
self.assertEqual(nok, single_objective_heuristic.objective_function)
self.assertFalse(single_objective_heuristic.is_mo())
self.assertRaises(TypeError,
single_objective_heuristic.objective_function,
self.multiobjective_function)
single_objective_heuristic.objective_function = [nok]
self.assertEqual(nok, single_objective_heuristic.objective_function)
self.assertFalse(single_objective_heuristic.is_mo())
self.assertRaises(TypeError, single_objective_heuristic.objective_function, self.multiobjective_function)
multiobjective_heuristic = HeuristicOptimization(
model=self.model,
objective_function=self.multiobjective_function,
heuristic_method=inspyred.ec.emo.NSGA2
)
multiobjective_heuristic.objective_function = nok
self.assertEqual(len(multiobjective_heuristic.objective_function), 1)
self.assertEqual(multiobjective_heuristic.objective_function[0], nok)
self.assertTrue(multiobjective_heuristic.is_mo())
def test_change_heuristic_method(self):
single_objective_heuristic = HeuristicOptimization(
model=self.model,
objective_function=self.single_objective_function,
)
single_objective_heuristic.heuristic_method = inspyred.ec.emo.NSGA2
self.assertTrue(single_objective_heuristic.is_mo())
self.assertEqual(len(single_objective_heuristic.objective_function), 1)
multiobjective_heuristic = HeuristicOptimization(
model=self.model,
objective_function=self.multiobjective_function,
heuristic_method=inspyred.ec.emo.NSGA2
)
self.assertRaises(TypeError, multiobjective_heuristic.heuristic_method, inspyred.ec.GA)
multiobjective_heuristic.objective_function = self.single_objective_function
multiobjective_heuristic.heuristic_method = inspyred.ec.GA
self.assertFalse(multiobjective_heuristic.is_mo())
def test_set_distance_function(self):
s1 = {1, 2, 3}
s2 = {1, 2, 3, 4}
d = set_distance_function(s1, s2)
self.assertEqual(d, 1)
s3 = {2, 3, 4}
d = set_distance_function(s1, s3)
self.assertEqual(d, 2)
d = set_distance_function(s3, s2)
self.assertEqual(d, 1)
class TestKnockoutOptimizationResult(unittest.TestCase):
def setUp(self):
self.model = TEST_MODEL
self.representation = [r.id for r in self.model.reactions]
random = Random(SEED)
args = {"representation": self.representation}
self.solutions = BestSolutionArchiver()
for _ in range(10000):
self.solutions.add(set_generator(random, args), random.random(), 100)
self.decoder = ReactionKnockoutDecoder(self.representation, self.model)
def test_result(self):
result = KnockoutOptimizationResult(
model=self.model,
heuristic_method=None,
simulation_method=fba,
solutions=self.solutions,
objective_function=None,
ko_type="reaction",
decoder=self.decoder,
product="EX_ac_LPAREN_e_RPAREN_",
biomass="Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2",
seed=SEED,
reference=None)
self.assertEqual(result.ko_type, "reaction")
individuals = []
for index, row in result.solutions.iterrows():
individual = SolutionTuple(set(self.representation.index(r) for r in row["Knockouts"]), row["Fitness"])
self.assertNotIn(individual, individuals, msg="%s is repeated on result")
individuals.append(individual)
self.assertIn(individual, self.solutions.archive)
self.assertEqual(len(row["Knockouts"]), row["Size"])
self.assertEqual(self.solutions.archive.count(individual), 1, msg="%s is unique in archive" % individual)
class TestReactionKnockoutOptimization(unittest.TestCase):
def setUp(self):
self.model = TEST_MODEL
self.essential_reactions = set([r.id for r in self.model.essential_reactions()])
def test_initialize(self):
rko = ReactionKnockoutOptimization(model=self.model,
simulation_method=fba,
seed=SEED)
self.assertTrue(sorted(self.essential_reactions) == sorted(rko.essential_reactions))
self.assertEqual(rko._ko_type, "reaction")
self.assertTrue(isinstance(rko._decoder, ReactionKnockoutDecoder))
@unittest.skipIf(True, 'Broken ..')
def test_run_single_objective(self):
result_file = os.path.join(CURRENT_PATH, "data", "reaction_knockout_single_objective.pkl")
objective = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2",
"EX_ac_LPAREN_e_RPAREN_",
"EX_glc_LPAREN_e_RPAREN_")
rko = ReactionKnockoutOptimization(model=self.model,
simulation_method=fba,
objective_function=objective,
seed=SEED)
# self.assertEqual(rko.random.random(), 0.1915194503788923)
results = rko.run(max_evaluations=3000, pop_size=10, view=SequentialView())
# self.assertEqual(rko.random.random(), 0.9268454219291495)
with open(result_file, 'rb') as in_file:
if six.PY3:
expected_results = pickle.load(in_file, encoding="latin1")
else:
expected_results = pickle.load(in_file)
assert_frame_equal(results.solutions, expected_results.solutions)
@unittest.skipIf(True, 'Broken ..')
def test_run_multiobjective(self):
result_file = os.path.join(CURRENT_PATH, "data", "reaction_knockout_multi_objective.pkl")
objective1 = biomass_product_coupled_yield(
"Biomass_Ecoli_core_N_LPAREN_w_FSLASH_GAM_RPAREN__Nmet2",
"EX_ac_LPAREN_e_RPAREN_",
"EX_glc_LPAREN_e_RPAREN_")
objective2 = number_of_knockouts()
objective = [objective1, objective2]
rko = ReactionKnockoutOptimization(model=self.model,
simulation_method=fba,
objective_function=objective,
heuristic_method=inspyred.ec.emo.NSGA2,
seed=SEED)
# self.assertEqual(rko.random.random(), 0.1915194503788923)
results = rko.run(max_evaluations=3000, pop_size=10, view=SequentialView())
# print(rko.random.random(), 0.545818634701)
with open(result_file, 'rb') as in_file:
if six.PY3:
expected_results = pickle.load(in_file, encoding="latin1")
else:
expected_results = pickle.load(in_file)
assert_frame_equal(results.solutions, expected_results.solutions)
def test_evaluator(self):
pass
class VariatorsTestCase(unittest.TestCase):
def test_set_n_point_crossover(self):
mom = OrderedSet([1, 3, 5, 9, 10])
dad = OrderedSet([2, 3, 7, 8])
args = {
"crossover_rate": 1.0,
"num_crossover_points": 1,
"candidate_size": 10
}
children = set_n_point_crossover(Random(SEED), [mom, dad], args)
bro = OrderedSet([1, 3, 5, 8])
sis = OrderedSet([2, 3, 7, 9, 10])
self.assertEqual(bro, children[0])
self.assertEqual(sis, children[1])
def test_set_mutation(self):
individual = OrderedSet([1, 3, 5, 9, 10])
representation = list(range(10))
args = {
"representation": representation,
"mutation_rate": 1
}
new_individuals = set_mutation(Random(SEED), [individual], args)
self.assertEqual(new_individuals[0], [6, 4, 1, 0])
def test_set_indel(self):
individual = OrderedSet([1, 3, 5, 9, 10])
representation = list(range(10))
args = {
"representation": representation,
"indel_rate": 1
}
new_individuals = set_indel(Random(SEED), [individual], args)
self.assertEqual(new_individuals[0], [5, 3, 9, 1])
def test_do_set_n_point_crossover(self):
representation = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N"]
int_representation = [representation.index(v) for v in representation]
mom = OrderedSet([representation.index(v) for v in ["A", "B", "E", "K", "L", "M"]])
dad = OrderedSet([representation.index(v) for v in ["A", "C", "I", "J", "K", "L"]])
points = [4]
children = _do_set_n_point_crossover(int_representation, mom, dad, points, Random(), len(mom))
bro = OrderedSet([0, 1, 8, 9, 10, 11])
sis = OrderedSet([0, 2, 4, 10, 11, 12])
self.assertEqual(children[0], bro)
self.assertEqual(children[1], sis)
def test_multiple_chromosome_set_mutation(self):
genome = MultipleChromosomeGenome(["A", "B"])
genome["A"] = [1, 2, 3, 4]
genome["B"] = [1, 5, 7, 10]
representation = list(range(10))
args = {
"A_representation": representation,
"B_representation": representation,
"A_mutation_rate": 1,
"B_mutation_rate": 1
}
new_individuals = multiple_chromosome_set_mutation(Random(SEED), [genome], args)
self.assertEqual(new_individuals[0]["A"], OrderedSet([6, 4, 1]))
self.assertEqual(new_individuals[0]["B"], OrderedSet([0, 6, 2]))
def test_multiple_chromosome_set_indel(self):
genome = MultipleChromosomeGenome(["A", "B"])
genome["A"] = [1, 2, 3, 4]
genome["B"] = [1, 5, 7, 10]
representation = list(range(10))
args = {
"A_representation": representation,
"B_representation": representation,
"A_indel_rate": 1,
"B_indel_rate": 1
}
random = Random(SEED)
new_individuals = multiple_chromosome_set_indel(random, [genome for _ in range(5)], args)
self.assertEqual(new_individuals[0]["A"], OrderedSet([2, 3, 4]))
self.assertEqual(new_individuals[0]["B"], OrderedSet([10, 1, 7]))
self.assertEqual(new_individuals[1]["A"], OrderedSet([2, 1, 4]))
self.assertEqual(new_individuals[1]["B"], OrderedSet([1, 5, 7, 10]))
self.assertEqual(new_individuals[2]["A"], OrderedSet([1, 2, 3, 4, 8]))
self.assertEqual(new_individuals[2]["B"], OrderedSet([5, 1, 10]))
self.assertEqual(new_individuals[3]["A"], OrderedSet([1, 2, 3, 4]))
self.assertEqual(new_individuals[3]["B"], OrderedSet([1, 5, 7, 10]))
self.assertEqual(new_individuals[4]["A"], OrderedSet([1, 4, 3]))
self.assertEqual(new_individuals[4]["B"], OrderedSet([5, 1, 7]))
class GenomesTestCase(unittest.TestCase):
def test_two_chromosomes(self):
genome = MultipleChromosomeGenome(["A", "B"])
self.assertIsInstance(genome["A"], OrderedSet)
self.assertIsInstance(genome["B"], OrderedSet)
genome["A"] = [1, 2, 3, 4]
genome["B"] = ["A", "B", "C"]
self.assertEqual(genome["A"], OrderedSet([1, 2, 3, 4]))
self.assertEqual(genome["B"], OrderedSet(["A", "B", "C"]))
del genome["A"]
self.assertRaises(KeyError, genome.__getitem__, "A")
| |
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional, Tuple
from unittest import mock
from django.conf import settings
from django.utils.timezone import now as timezone_now
from zerver.lib.actions import (
do_add_submessage,
do_create_realm,
do_delete_messages,
do_set_realm_property,
internal_send_private_message,
)
from zerver.lib.retention import (
archive_messages,
clean_archived_data,
get_realms_and_streams_for_archiving,
move_messages_to_archive,
restore_all_data_from_archive,
restore_retention_policy_deletions_for_stream,
)
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import queries_captured, zulip_reaction_info
from zerver.lib.upload import create_attachment
from zerver.models import (
ArchivedAttachment,
ArchivedMessage,
ArchivedReaction,
ArchivedSubMessage,
ArchivedUserMessage,
ArchiveTransaction,
Attachment,
Message,
Reaction,
Realm,
Stream,
SubMessage,
UserMessage,
get_realm,
get_stream,
get_system_bot,
)
# Class with helper functions useful for testing archiving of reactions:
from zerver.tornado.django_api import send_event
ZULIP_REALM_DAYS = 30
MIT_REALM_DAYS = 100
class RetentionTestingBase(ZulipTestCase):
def _get_usermessage_ids(self, message_ids: List[int]) -> List[int]:
return list(
UserMessage.objects.filter(message_id__in=message_ids).values_list("id", flat=True)
)
def _verify_archive_data(
self, expected_message_ids: List[int], expected_usermessage_ids: List[int]
) -> None:
self.assertEqual(
set(ArchivedMessage.objects.values_list("id", flat=True)),
set(expected_message_ids),
)
self.assertEqual(
set(ArchivedUserMessage.objects.values_list("id", flat=True)),
set(expected_usermessage_ids),
)
# Archived Messages and UserMessages should have been removed from the normal tables:
self.assertEqual(Message.objects.filter(id__in=expected_message_ids).count(), 0)
self.assertEqual(UserMessage.objects.filter(id__in=expected_usermessage_ids).count(), 0)
def _verify_restored_data(
self, expected_message_ids: List[int], expected_usermessage_ids: List[int]
) -> None:
# Check that the data was restored:
self.assertEqual(
set(Message.objects.filter(id__in=expected_message_ids).values_list("id", flat=True)),
set(expected_message_ids),
)
self.assertEqual(
set(
UserMessage.objects.filter(id__in=expected_usermessage_ids).values_list(
"id", flat=True
)
),
set(expected_usermessage_ids),
)
# The Messages and UserMessages should still be in the archive - we don't delete them.
self.assertEqual(
set(ArchivedMessage.objects.values_list("id", flat=True)),
set(expected_message_ids),
)
self.assertEqual(
set(ArchivedUserMessage.objects.values_list("id", flat=True)),
set(expected_usermessage_ids),
)
class ArchiveMessagesTestingBase(RetentionTestingBase):
def setUp(self) -> None:
super().setUp()
self.zulip_realm = get_realm("zulip")
self.mit_realm = get_realm("zephyr")
self._set_realm_message_retention_value(self.zulip_realm, ZULIP_REALM_DAYS)
self._set_realm_message_retention_value(self.mit_realm, MIT_REALM_DAYS)
# Set publication date of all existing messages to "now", so that we have full
# control over what's expired and what isn't.
Message.objects.all().update(date_sent=timezone_now())
def _set_realm_message_retention_value(self, realm: Realm, retention_period: int) -> None:
realm.message_retention_days = retention_period
realm.save()
def _set_stream_message_retention_value(
self, stream: Stream, retention_period: Optional[int]
) -> None:
stream.message_retention_days = retention_period
stream.save()
def _change_messages_date_sent(self, msgs_ids: List[int], date_sent: datetime) -> None:
Message.objects.filter(id__in=msgs_ids).update(date_sent=date_sent)
def _make_mit_messages(self, message_quantity: int, date_sent: datetime) -> Any:
# send messages from mit.edu realm and change messages pub date
sender = self.mit_user("espuser")
recipient = self.mit_user("starnine")
msg_ids = [self.send_personal_message(sender, recipient) for i in range(message_quantity)]
self._change_messages_date_sent(msg_ids, date_sent)
return msg_ids
def _send_cross_realm_personal_message(self) -> int:
# Send message from bot to users from different realm.
bot_email = "notification-bot@zulip.com"
internal_realm = get_realm(settings.SYSTEM_BOT_REALM)
zulip_user = self.example_user("hamlet")
msg_id = internal_send_private_message(
sender=get_system_bot(bot_email, internal_realm.id),
recipient_user=zulip_user,
content="test message",
)
assert msg_id is not None
return msg_id
def _make_expired_zulip_messages(self, message_quantity: int) -> List[int]:
msg_ids = list(
Message.objects.order_by("id")
.filter(sender__realm=self.zulip_realm)
.values_list("id", flat=True)[3 : 3 + message_quantity]
)
self._change_messages_date_sent(
msg_ids,
timezone_now() - timedelta(ZULIP_REALM_DAYS + 1),
)
return msg_ids
def _send_messages_with_attachments(self) -> Dict[str, int]:
user_profile = self.example_user("hamlet")
sample_size = 10
host = user_profile.realm.host
realm_id = get_realm("zulip").id
dummy_files = [
("zulip.txt", f"{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt", sample_size),
("temp_file.py", f"{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py", sample_size),
("abc.py", f"{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py", sample_size),
]
for file_name, path_id, size in dummy_files:
create_attachment(file_name, path_id, user_profile, size)
self.subscribe(user_profile, "Denmark")
body = (
"Some files here ... [zulip.txt](http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)"
+ " http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py.... Some more...."
+ " http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py"
).format(id=realm_id, host=host)
expired_message_id = self.send_stream_message(user_profile, "Denmark", body)
actual_message_id = self.send_stream_message(user_profile, "Denmark", body)
othello = self.example_user("othello")
other_message_id = self.send_stream_message(othello, "Denmark", body)
self._change_messages_date_sent(
[expired_message_id], timezone_now() - timedelta(days=MIT_REALM_DAYS + 1)
)
return {
"expired_message_id": expired_message_id,
"actual_message_id": actual_message_id,
"other_user_message_id": other_message_id,
}
class TestArchiveMessagesGeneral(ArchiveMessagesTestingBase):
def test_no_expired_messages(self) -> None:
archive_messages()
self.assertEqual(ArchivedUserMessage.objects.count(), 0)
self.assertEqual(ArchivedMessage.objects.count(), 0)
def test_expired_messages_in_each_realm(self) -> None:
"""General test for archiving expired messages properly with
multiple realms involved"""
# Make some expired messages in MIT:
expired_mit_msg_ids = self._make_mit_messages(
5,
timezone_now() - timedelta(days=MIT_REALM_DAYS + 1),
)
# Make some non-expired messages in MIT:
self._make_mit_messages(4, timezone_now() - timedelta(days=MIT_REALM_DAYS - 1))
# Change some Zulip messages to be expired:
expired_zulip_msg_ids = list(
Message.objects.order_by("id")
.filter(sender__realm=self.zulip_realm)
.values_list("id", flat=True)[3:10]
)
self._change_messages_date_sent(
expired_zulip_msg_ids,
timezone_now() - timedelta(ZULIP_REALM_DAYS + 1),
)
expired_msg_ids = expired_mit_msg_ids + expired_zulip_msg_ids
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
archive_messages()
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(expired_msg_ids, expired_usermsg_ids)
def test_expired_messages_in_one_realm(self) -> None:
"""Test with a retention policy set for only the MIT realm"""
self._set_realm_message_retention_value(self.zulip_realm, -1)
# Make some expired messages in MIT:
expired_mit_msg_ids = self._make_mit_messages(
5,
timezone_now() - timedelta(days=MIT_REALM_DAYS + 1),
)
# Make some non-expired messages in MIT:
self._make_mit_messages(4, timezone_now() - timedelta(days=MIT_REALM_DAYS - 1))
# Change some Zulip messages date_sent, but the realm has no retention policy,
# so they shouldn't get archived
zulip_msg_ids = list(
Message.objects.order_by("id")
.filter(sender__realm=self.zulip_realm)
.values_list("id", flat=True)[3:10]
)
self._change_messages_date_sent(
zulip_msg_ids,
timezone_now() - timedelta(ZULIP_REALM_DAYS + 1),
)
# Only MIT has a retention policy:
expired_msg_ids = expired_mit_msg_ids
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
archive_messages()
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(expired_msg_ids, expired_usermsg_ids)
self._set_realm_message_retention_value(self.zulip_realm, ZULIP_REALM_DAYS)
def test_different_stream_realm_policies(self) -> None:
verona = get_stream("Verona", self.zulip_realm)
hamlet = self.example_user("hamlet")
msg_id = self.send_stream_message(hamlet, "Verona", "test")
usermsg_ids = self._get_usermessage_ids([msg_id])
self._change_messages_date_sent([msg_id], timezone_now() - timedelta(days=2))
# Don't archive if stream's retention policy set to -1:
self._set_realm_message_retention_value(self.zulip_realm, 1)
self._set_stream_message_retention_value(verona, -1)
archive_messages()
self._verify_archive_data([], [])
# Don't archive if stream and realm have no retention policy:
self._set_realm_message_retention_value(self.zulip_realm, -1)
self._set_stream_message_retention_value(verona, None)
archive_messages()
self._verify_archive_data([], [])
# Archive if stream has a retention policy set:
self._set_realm_message_retention_value(self.zulip_realm, -1)
self._set_stream_message_retention_value(verona, 1)
archive_messages()
self._verify_archive_data([msg_id], usermsg_ids)
def test_cross_realm_personal_message_archiving(self) -> None:
"""Check that cross-realm personal messages get correctly archived."""
msg_ids = [self._send_cross_realm_personal_message() for i in range(1, 7)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
# Make the message expired on the recipient's realm:
self._change_messages_date_sent(msg_ids, timezone_now() - timedelta(ZULIP_REALM_DAYS + 1))
archive_messages()
self._verify_archive_data(msg_ids, usermsg_ids)
def test_archiving_interrupted(self) -> None:
"""Check that queries get rolled back to a consistent state
if archiving gets interrupted in the middle of processing a chunk."""
expired_msg_ids = self._make_expired_zulip_messages(7)
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
# Insert an exception near the end of the archiving process of a chunk:
with mock.patch("zerver.lib.retention.delete_messages", side_effect=Exception):
with self.assertRaises(Exception):
# Specify large chunk_size to ensure things happen in a single batch
archive_messages(chunk_size=1000)
# Archiving code has been executed, but because we got an exception, things should have been rolled back:
self._verify_archive_data([], [])
self.assertEqual(
set(Message.objects.filter(id__in=expired_msg_ids).values_list("id", flat=True)),
set(expired_msg_ids),
)
self.assertEqual(
set(
UserMessage.objects.filter(id__in=expired_usermsg_ids).values_list(
"id", flat=True
)
),
set(expired_usermsg_ids),
)
def test_archive_message_tool(self) -> None:
"""End-to-end test of the archiving tool, directly calling
archive_messages."""
# Make some expired messages in MIT:
expired_mit_msg_ids = self._make_mit_messages(
5,
timezone_now() - timedelta(days=MIT_REALM_DAYS + 1),
)
# Make some non-expired messages in MIT:
self._make_mit_messages(4, timezone_now() - timedelta(days=MIT_REALM_DAYS - 1))
# Change some Zulip messages to be expired:
expired_zulip_msg_ids = self._make_expired_zulip_messages(7)
expired_crossrealm_msg_id = self._send_cross_realm_personal_message()
# Make the message expired in the recipient's realm:
self._change_messages_date_sent(
[expired_crossrealm_msg_id],
timezone_now() - timedelta(ZULIP_REALM_DAYS + 1),
)
expired_msg_ids = [*expired_mit_msg_ids, *expired_zulip_msg_ids, expired_crossrealm_msg_id]
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
archive_messages(chunk_size=2) # Specify low chunk_size to test batching.
# Make sure we archived what needed:
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(expired_msg_ids, expired_usermsg_ids)
def test_archiving_attachments(self) -> None:
"""End-to-end test for the logic for archiving attachments. This test
is hard to read without first reading _send_messages_with_attachments"""
msgs_ids = self._send_messages_with_attachments()
# First, confirm deleting the oldest message
# (`expired_message_id`) creates ArchivedAttachment objects
# and associates that message ID with them, but does not
# delete the Attachment object.
archive_messages()
self.assertEqual(ArchivedAttachment.objects.count(), 3)
self.assertEqual(
list(
ArchivedAttachment.objects.distinct("messages__id").values_list(
"messages__id", flat=True
)
),
[msgs_ids["expired_message_id"]],
)
self.assertEqual(Attachment.objects.count(), 3)
# Now make `actual_message_id` expired too. We still don't
# delete the Attachment objects.
self._change_messages_date_sent(
[msgs_ids["actual_message_id"]], timezone_now() - timedelta(days=MIT_REALM_DAYS + 1)
)
archive_messages()
self.assertEqual(Attachment.objects.count(), 3)
# Finally, make the last message mentioning those attachments
# expired. We should now delete the Attachment objects and
# each ArchivedAttachment object should list all 3 messages.
self._change_messages_date_sent(
[msgs_ids["other_user_message_id"]], timezone_now() - timedelta(days=MIT_REALM_DAYS + 1)
)
archive_messages()
self.assertEqual(Attachment.objects.count(), 0)
self.assertEqual(ArchivedAttachment.objects.count(), 3)
self.assertEqual(
list(
ArchivedAttachment.objects.distinct("messages__id")
.order_by("messages__id")
.values_list("messages__id", flat=True)
),
sorted(msgs_ids.values()),
)
restore_all_data_from_archive()
# Attachments should have been restored:
self.assertEqual(Attachment.objects.count(), 3)
# Archived data doesn't get deleted by restoring.
self.assertEqual(ArchivedAttachment.objects.count(), 3)
self.assertEqual(
list(
Attachment.objects.distinct("messages__id")
.order_by("messages__id")
.values_list("messages__id", flat=True)
),
sorted(msgs_ids.values()),
)
def test_restoring_and_rearchiving(self) -> None:
expired_msg_ids = self._make_mit_messages(
7,
timezone_now() - timedelta(days=MIT_REALM_DAYS + 1),
)
expired_usermsg_ids = self._get_usermessage_ids(expired_msg_ids)
archive_messages(chunk_size=4)
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
transactions = ArchiveTransaction.objects.all()
self.assert_length(transactions, 2) # With chunk_size 4, there should be 2 transactions
restore_all_data_from_archive()
transactions[0].refresh_from_db()
transactions[1].refresh_from_db()
self.assertTrue(transactions[0].restored)
self.assertTrue(transactions[1].restored)
archive_messages(chunk_size=10)
self._verify_archive_data(expired_msg_ids, expired_usermsg_ids)
transactions = ArchiveTransaction.objects.order_by("id")
self.assert_length(transactions, 3)
archived_messages = ArchivedMessage.objects.filter(id__in=expired_msg_ids)
# Check that the re-archived messages are correctly assigned to the new transaction:
for message in archived_messages:
self.assertEqual(message.archive_transaction_id, transactions[2].id)
class TestArchivingSubMessages(ArchiveMessagesTestingBase):
def test_archiving_submessages(self) -> None:
expired_msg_ids = self._make_expired_zulip_messages(2)
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
do_add_submessage(
realm=self.zulip_realm,
sender_id=cordelia.id,
message_id=expired_msg_ids[0],
msg_type="whatever",
content='{"name": "alice", "salary": 20}',
)
do_add_submessage(
realm=self.zulip_realm,
sender_id=hamlet.id,
message_id=expired_msg_ids[0],
msg_type="whatever",
content='{"name": "john", "salary": 30}',
)
do_add_submessage(
realm=self.zulip_realm,
sender_id=cordelia.id,
message_id=expired_msg_ids[1],
msg_type="whatever",
content='{"name": "jack", "salary": 10}',
)
submessage_ids = list(
SubMessage.objects.filter(message_id__in=expired_msg_ids).values_list("id", flat=True),
)
self.assert_length(submessage_ids, 3)
self.assertEqual(SubMessage.objects.filter(id__in=submessage_ids).count(), 3)
archive_messages()
self.assertEqual(SubMessage.objects.filter(id__in=submessage_ids).count(), 0)
self.assertEqual(
set(
ArchivedSubMessage.objects.filter(id__in=submessage_ids).values_list(
"id", flat=True
)
),
set(submessage_ids),
)
restore_all_data_from_archive()
self.assertEqual(
set(SubMessage.objects.filter(id__in=submessage_ids).values_list("id", flat=True)),
set(submessage_ids),
)
class TestArchivingReactions(ArchiveMessagesTestingBase):
def test_archiving_reactions(self) -> None:
expired_msg_ids = self._make_expired_zulip_messages(2)
hamlet = self.example_user("hamlet")
cordelia = self.example_user("cordelia")
for sender in [hamlet, cordelia]:
self.api_post(
sender,
f"/api/v1/messages/{expired_msg_ids[0]}/reactions",
zulip_reaction_info(),
)
self.api_post(
hamlet,
f"/api/v1/messages/{expired_msg_ids[1]}/reactions",
zulip_reaction_info(),
)
reaction_ids = list(
Reaction.objects.filter(message_id__in=expired_msg_ids).values_list("id", flat=True),
)
self.assert_length(reaction_ids, 3)
self.assertEqual(Reaction.objects.filter(id__in=reaction_ids).count(), 3)
archive_messages()
self.assertEqual(Reaction.objects.filter(id__in=reaction_ids).count(), 0)
self.assertEqual(
set(ArchivedReaction.objects.filter(id__in=reaction_ids).values_list("id", flat=True)),
set(reaction_ids),
)
restore_all_data_from_archive()
self.assertEqual(
set(Reaction.objects.filter(id__in=reaction_ids).values_list("id", flat=True)),
set(reaction_ids),
)
class MoveMessageToArchiveBase(RetentionTestingBase):
def setUp(self) -> None:
super().setUp()
self.sender = self.example_user("hamlet")
self.recipient = self.example_user("cordelia")
def _create_attachments(self) -> None:
sample_size = 10
realm_id = get_realm("zulip").id
dummy_files = [
("zulip.txt", f"{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt", sample_size),
("temp_file.py", f"{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py", sample_size),
("abc.py", f"{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py", sample_size),
("hello.txt", f"{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/hello.txt", sample_size),
("new.py", f"{realm_id}/31/4CBjtTLYZhk66pZrF8hnYGwc/new.py", sample_size),
]
user_profile = self.example_user("hamlet")
for file_name, path_id, size in dummy_files:
create_attachment(file_name, path_id, user_profile, size)
def _assert_archive_empty(self) -> None:
self.assertFalse(ArchivedUserMessage.objects.exists())
self.assertFalse(ArchivedMessage.objects.exists())
self.assertFalse(ArchivedAttachment.objects.exists())
class MoveMessageToArchiveGeneral(MoveMessageToArchiveBase):
def test_personal_messages_archiving(self) -> None:
msg_ids = [self.send_personal_message(self.sender, self.recipient) for i in range(0, 3)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(msg_ids, usermsg_ids)
def test_move_messages_to_archive_with_realm_argument(self) -> None:
realm = get_realm("zulip")
msg_ids = [self.send_personal_message(self.sender, self.recipient) for i in range(0, 3)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids, realm=realm)
self._verify_archive_data(msg_ids, usermsg_ids)
archive_transaction = ArchiveTransaction.objects.last()
assert archive_transaction is not None
self.assertEqual(archive_transaction.realm, realm)
def test_stream_messages_archiving(self) -> None:
msg_ids = [self.send_stream_message(self.sender, "Verona") for i in range(0, 3)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(msg_ids, usermsg_ids)
def test_archiving_messages_second_time(self) -> None:
msg_ids = [self.send_stream_message(self.sender, "Verona") for i in range(0, 3)]
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
with self.assertRaises(Message.DoesNotExist):
move_messages_to_archive(message_ids=msg_ids)
def test_archiving_messages_multiple_realms(self) -> None:
"""
Verifies that move_messages_to_archive works correctly
if called on messages in multiple realms.
"""
iago = self.example_user("iago")
othello = self.example_user("othello")
cordelia = self.lear_user("cordelia")
king = self.lear_user("king")
zulip_msg_ids = [self.send_personal_message(iago, othello) for i in range(0, 3)]
leary_msg_ids = [self.send_personal_message(cordelia, king) for i in range(0, 3)]
msg_ids = zulip_msg_ids + leary_msg_ids
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
restore_all_data_from_archive()
self._verify_restored_data(msg_ids, usermsg_ids)
def test_archiving_messages_with_attachment(self) -> None:
self._create_attachments()
realm_id = get_realm("zulip").id
host = get_realm("zulip").host
body1 = """Some files here ...[zulip.txt](
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py ....
Some more.... http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py
""".format(
id=realm_id, host=host
)
body2 = """Some files here
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt ...
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/hello.txt ....
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/new.py ....
""".format(
id=realm_id, host=host
)
msg_ids = [
self.send_personal_message(self.sender, self.recipient, body1),
self.send_personal_message(self.sender, self.recipient, body2),
]
attachment_id_to_message_ids: Dict[int, List[int]] = {}
attachment_ids = list(
Attachment.objects.filter(messages__id__in=msg_ids).values_list("id", flat=True),
)
for attachment_id in attachment_ids:
attachment_id_to_message_ids[attachment_id] = list(
Message.objects.filter(attachment__id=attachment_id).values_list("id", flat=True),
)
usermsg_ids = self._get_usermessage_ids(msg_ids)
self._assert_archive_empty()
move_messages_to_archive(message_ids=msg_ids)
self._verify_archive_data(msg_ids, usermsg_ids)
self.assertFalse(Attachment.objects.exists())
archived_attachment_ids = list(
ArchivedAttachment.objects.filter(messages__id__in=msg_ids).values_list(
"id", flat=True
),
)
self.assertEqual(set(attachment_ids), set(archived_attachment_ids))
for attachment_id in archived_attachment_ids:
self.assertEqual(
set(attachment_id_to_message_ids[attachment_id]),
set(
ArchivedMessage.objects.filter(
archivedattachment__id=attachment_id
).values_list("id", flat=True)
),
)
restore_all_data_from_archive()
self._verify_restored_data(msg_ids, usermsg_ids)
restored_attachment_ids = list(
Attachment.objects.filter(messages__id__in=msg_ids).values_list("id", flat=True),
)
self.assertEqual(set(attachment_ids), set(restored_attachment_ids))
for attachment_id in restored_attachment_ids:
self.assertEqual(
set(attachment_id_to_message_ids[attachment_id]),
set(
Message.objects.filter(attachment__id=attachment_id).values_list(
"id", flat=True
)
),
)
def test_archiving_message_with_shared_attachment(self) -> None:
# Make sure that attachments still in use in other messages don't get deleted:
self._create_attachments()
realm_id = get_realm("zulip").id
host = get_realm("zulip").host
body = """Some files here ...[zulip.txt](
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/zulip.txt)
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/temp_file.py ....
Some more.... http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/abc.py ...
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/new.py ....
http://{host}/user_uploads/{id}/31/4CBjtTLYZhk66pZrF8hnYGwc/hello.txt ....
""".format(
id=realm_id, host=host
)
msg_id = self.send_personal_message(self.sender, self.recipient, body)
# Simulate a reply with the same contents.
reply_msg_id = self.send_personal_message(
from_user=self.recipient,
to_user=self.sender,
content=body,
)
usermsg_ids = self._get_usermessage_ids([msg_id])
attachment_ids = list(
Attachment.objects.filter(messages__id=msg_id).values_list("id", flat=True),
)
self._assert_archive_empty()
# Archive one of the messages:
move_messages_to_archive(message_ids=[msg_id])
self._verify_archive_data([msg_id], usermsg_ids)
# Attachments shouldn't have been deleted, as the second message links to them:
self.assertEqual(Attachment.objects.count(), 5)
self.assertEqual(
set(
ArchivedAttachment.objects.filter(messages__id=msg_id).values_list("id", flat=True)
),
set(attachment_ids),
)
# Restore the first message:
restore_all_data_from_archive()
# Archive the second:
move_messages_to_archive(message_ids=[reply_msg_id])
# The restored messages links to the Attachments, so they shouldn't be deleted:
self.assertEqual(Attachment.objects.count(), 5)
# Archive the first message again:
move_messages_to_archive(message_ids=[msg_id])
# Now the attachment should have been deleted:
self.assertEqual(Attachment.objects.count(), 0)
# Restore everything:
restore_all_data_from_archive()
self.assertEqual(
set(Attachment.objects.filter(messages__id=msg_id).values_list("id", flat=True)),
set(attachment_ids),
)
class MoveMessageToArchiveWithSubMessages(MoveMessageToArchiveBase):
def test_archiving_message_with_submessages(self) -> None:
msg_id = self.send_stream_message(self.sender, "Verona")
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
do_add_submessage(
realm=get_realm("zulip"),
sender_id=cordelia.id,
message_id=msg_id,
msg_type="whatever",
content='{"name": "alice", "salary": 20}',
)
do_add_submessage(
realm=get_realm("zulip"),
sender_id=hamlet.id,
message_id=msg_id,
msg_type="whatever",
content='{"name": "john", "salary": 30}',
)
submessage_ids = list(
SubMessage.objects.filter(message_id=msg_id).values_list("id", flat=True),
)
self.assertEqual(SubMessage.objects.filter(id__in=submessage_ids).count(), 2)
move_messages_to_archive(message_ids=[msg_id])
self.assertEqual(
set(ArchivedSubMessage.objects.filter(message_id=msg_id).values_list("id", flat=True)),
set(submessage_ids),
)
self.assertEqual(SubMessage.objects.filter(id__in=submessage_ids).count(), 0)
restore_all_data_from_archive()
self.assertEqual(
set(SubMessage.objects.filter(id__in=submessage_ids).values_list("id", flat=True)),
set(submessage_ids),
)
class MoveMessageToArchiveWithReactions(MoveMessageToArchiveBase):
def test_archiving_message_with_reactions(self) -> None:
msg_id = self.send_stream_message(self.sender, "Verona")
for name in ["hamlet", "cordelia"]:
self.api_post(
self.example_user(name),
f"/api/v1/messages/{msg_id}/reactions",
zulip_reaction_info(),
)
reaction_ids = list(
Reaction.objects.filter(message_id=msg_id).values_list("id", flat=True),
)
self.assertEqual(Reaction.objects.filter(id__in=reaction_ids).count(), 2)
move_messages_to_archive(message_ids=[msg_id])
self.assertEqual(
set(ArchivedReaction.objects.filter(message_id=msg_id).values_list("id", flat=True)),
set(reaction_ids),
)
self.assertEqual(Reaction.objects.filter(id__in=reaction_ids).count(), 0)
restore_all_data_from_archive()
self.assertEqual(
set(Reaction.objects.filter(id__in=reaction_ids).values_list("id", flat=True)),
set(reaction_ids),
)
class TestCleaningArchive(ArchiveMessagesTestingBase):
def test_clean_archived_data(self) -> None:
self._make_expired_zulip_messages(7)
archive_messages(chunk_size=2) # Small chunk size to have multiple transactions
transactions = list(ArchiveTransaction.objects.all())
for transaction in transactions[0:-1]:
transaction.timestamp = timezone_now() - timedelta(
days=settings.ARCHIVED_DATA_VACUUMING_DELAY_DAYS + 1
)
transaction.save()
message_ids_to_clean = list(
ArchivedMessage.objects.filter(archive_transaction__in=transactions[0:-1]).values_list(
"id", flat=True
)
)
clean_archived_data()
remaining_transactions = list(ArchiveTransaction.objects.all())
self.assert_length(remaining_transactions, 1)
# All transactions except the last one were deleted:
self.assertEqual(remaining_transactions[0].id, transactions[-1].id)
# And corresponding ArchivedMessages should have been deleted:
self.assertFalse(ArchivedMessage.objects.filter(id__in=message_ids_to_clean).exists())
self.assertFalse(
ArchivedUserMessage.objects.filter(message_id__in=message_ids_to_clean).exists()
)
for message in ArchivedMessage.objects.all():
self.assertEqual(message.archive_transaction_id, remaining_transactions[0].id)
class TestGetRealmAndStreamsForArchiving(ZulipTestCase):
def fix_ordering_of_result(self, result: List[Tuple[Realm, List[Stream]]]) -> None:
"""
This is a helper for giving the structure returned by get_realms_and_streams_for_archiving
a consistent ordering.
"""
# Sort the list of tuples by realm id:
result.sort(key=lambda x: x[0].id)
# Now we sort the lists of streams in each tuple:
for realm, streams_list in result:
streams_list.sort(key=lambda stream: stream.id)
def simple_get_realms_and_streams_for_archiving(self) -> List[Tuple[Realm, List[Stream]]]:
"""
This is an implementation of the function we're testing, but using the obvious,
unoptimized algorithm. We can use this for additional verification of correctness,
by comparing the output of the two implementations.
"""
result = []
for realm in Realm.objects.all():
if realm.message_retention_days != -1:
streams = Stream.objects.filter(realm=realm).exclude(message_retention_days=-1)
result.append((realm, list(streams)))
else:
streams = (
Stream.objects.filter(realm=realm)
.exclude(message_retention_days__isnull=True)
.exclude(message_retention_days=-1)
)
if streams.exists():
result.append((realm, list(streams)))
return result
def test_get_realms_and_streams_for_archiving(self) -> None:
zulip_realm = get_realm("zulip")
zulip_realm.message_retention_days = 10
zulip_realm.save()
verona = get_stream("Verona", zulip_realm)
verona.message_retention_days = -1 # Block archiving for this stream
verona.save()
denmark = get_stream("Denmark", zulip_realm)
denmark.message_retention_days = 1
denmark.save()
zephyr_realm = get_realm("zephyr")
zephyr_realm.message_retention_days = -1
zephyr_realm.save()
self.make_stream("normal stream", realm=zephyr_realm)
archiving_blocked_zephyr_stream = self.make_stream("no archiving", realm=zephyr_realm)
archiving_blocked_zephyr_stream.message_retention_days = -1
archiving_blocked_zephyr_stream.save()
archiving_enabled_zephyr_stream = self.make_stream("with archiving", realm=zephyr_realm)
archiving_enabled_zephyr_stream.message_retention_days = 1
archiving_enabled_zephyr_stream.save()
no_archiving_realm = do_create_realm(string_id="no_archiving", name="no_archiving")
do_set_realm_property(no_archiving_realm, "invite_required", False, acting_user=None)
do_set_realm_property(no_archiving_realm, "message_retention_days", -1, acting_user=None)
# Realm for testing the edge case where it has a default retention policy,
# but all streams disable it.
realm_all_streams_archiving_disabled = do_create_realm(
string_id="with_archiving", name="with_archiving"
)
do_set_realm_property(
realm_all_streams_archiving_disabled, "invite_required", False, acting_user=None
)
do_set_realm_property(
realm_all_streams_archiving_disabled, "message_retention_days", 1, acting_user=None
)
Stream.objects.filter(realm=realm_all_streams_archiving_disabled).update(
message_retention_days=-1
)
# We construct a list representing how the result of get_realms_and_streams_for_archiving should be.
# One nuisance is that the ordering of the elements in the result structure is not deterministic,
# so we use a helper to order both structures in a consistent manner. This wouldn't be necessary
# if python had a true "unordered list" data structure. Set doesn't do the job, because it requires
# elements to be hashable.
expected_result = [
(zulip_realm, list(Stream.objects.filter(realm=zulip_realm).exclude(id=verona.id))),
(zephyr_realm, [archiving_enabled_zephyr_stream]),
(realm_all_streams_archiving_disabled, []),
]
self.fix_ordering_of_result(expected_result)
simple_algorithm_result = self.simple_get_realms_and_streams_for_archiving()
self.fix_ordering_of_result(simple_algorithm_result)
result = get_realms_and_streams_for_archiving()
self.fix_ordering_of_result(result)
self.assert_length(result, len(expected_result))
self.assertEqual(result, expected_result)
self.assert_length(result, len(simple_algorithm_result))
self.assertEqual(result, simple_algorithm_result)
class TestRestoreStreamMessages(ArchiveMessagesTestingBase):
def test_restore_retention_policy_deletions_for_stream(self) -> None:
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
realm = get_realm("zulip")
stream_name = "Verona"
stream = get_stream(stream_name, realm)
message_ids_to_archive_manually = [
self.send_stream_message(cordelia, stream_name, str(i)) for i in range(0, 2)
]
usermessage_ids_to_archive_manually = self._get_usermessage_ids(
message_ids_to_archive_manually
)
message_ids_to_archive_by_policy = [
self.send_stream_message(hamlet, stream_name, str(i)) for i in range(0, 2)
]
usermessage_ids_to_archive_by_policy = self._get_usermessage_ids(
message_ids_to_archive_by_policy
)
expected_archived_message_ids = (
message_ids_to_archive_manually + message_ids_to_archive_by_policy
)
expected_archived_usermessage_ids = (
usermessage_ids_to_archive_manually + usermessage_ids_to_archive_by_policy
)
self._set_stream_message_retention_value(stream, 5)
self._change_messages_date_sent(
message_ids_to_archive_by_policy, timezone_now() - timedelta(days=6)
)
move_messages_to_archive(message_ids_to_archive_manually)
archive_messages()
self._verify_archive_data(expected_archived_message_ids, expected_archived_usermessage_ids)
restore_retention_policy_deletions_for_stream(stream)
# Verify that we restore the stream messages that were archived due to retention policy,
# but not the ones manually deleted.
self.assert_length(
Message.objects.filter(id__in=message_ids_to_archive_by_policy),
len(message_ids_to_archive_by_policy),
)
self.assertFalse(Message.objects.filter(id__in=message_ids_to_archive_manually))
class TestDoDeleteMessages(ZulipTestCase):
def test_do_delete_messages_multiple(self) -> None:
realm = get_realm("zulip")
cordelia = self.example_user("cordelia")
message_ids = [self.send_stream_message(cordelia, "Verona", str(i)) for i in range(0, 10)]
messages = Message.objects.filter(id__in=message_ids)
with queries_captured() as queries:
do_delete_messages(realm, messages)
self.assertFalse(Message.objects.filter(id__in=message_ids).exists())
self.assert_length(queries, 19)
archived_messages = ArchivedMessage.objects.filter(id__in=message_ids)
self.assertEqual(archived_messages.count(), len(message_ids))
self.assert_length({message.archive_transaction_id for message in archived_messages}, 1)
def test_old_event_format_processed_correctly(self) -> None:
"""
do_delete_messages used to send events with users in dict format {"id": <int>}.
We have a block in process_notification to deal with that old format, that should be
deleted in a later release. This test is meant to ensure correctness of that block.
"""
realm = get_realm("zulip")
cordelia = self.example_user("cordelia")
hamlet = self.example_user("hamlet")
message_id = self.send_personal_message(cordelia, hamlet)
message = Message.objects.get(id=message_id)
event = {
"type": "delete_message",
"sender": message.sender.email,
"sender_id": message.sender_id,
"message_id": message.id,
"message_type": "private",
"recipient_id": message.recipient_id,
}
move_messages_to_archive([message_id])
# We only send the event to see no exception is thrown - as it would be if the block
# in process_notification to handle this old format of "users to notify" wasn't correct.
send_event(realm, event, [{"id": cordelia.id}, {"id": hamlet.id}])
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Ground.cad_passport'
db.alter_column(u'build_ground', 'cad_passport', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'Ground.offer'
db.alter_column(u'build_ground', 'offer', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'Ground.permission'
db.alter_column(u'build_ground', 'permission', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'Building.permission'
db.alter_column(u'build_building', 'permission', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'Building.offer'
db.alter_column(u'build_building', 'offer', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.facility_permission'
db.alter_column(u'build_contractdocuments', 'facility_permission', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.building_permissions'
db.alter_column(u'build_contractdocuments', 'building_permissions', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.mo_notice_to_citizen'
db.alter_column(u'build_contractdocuments', 'mo_notice_to_citizen', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.cost_infos'
db.alter_column(u'build_contractdocuments', 'cost_infos', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.acceptance_acts'
db.alter_column(u'build_contractdocuments', 'acceptance_acts', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.mun_act_to_fond'
db.alter_column(u'build_contractdocuments', 'mun_act_to_fond', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.hiring_contract'
db.alter_column(u'build_contractdocuments', 'hiring_contract', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.mo_certificate'
db.alter_column(u'build_contractdocuments', 'mo_certificate', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.photos'
db.alter_column(u'build_contractdocuments', 'photos', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.land_right_stating'
db.alter_column(u'build_contractdocuments', 'land_right_stating', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.transmission_acts'
db.alter_column(u'build_contractdocuments', 'transmission_acts', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.approval_citizen_statement'
db.alter_column(u'build_contractdocuments', 'approval_citizen_statement', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.mun_contracts'
db.alter_column(u'build_contractdocuments', 'mun_contracts', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.protocols'
db.alter_column(u'build_contractdocuments', 'protocols', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
# Changing field 'ContractDocuments.tec_passport_tec_plan'
db.alter_column(u'build_contractdocuments', 'tec_passport_tec_plan', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True))
def backwards(self, orm):
# Changing field 'Ground.cad_passport'
db.alter_column(u'build_ground', 'cad_passport', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'Ground.offer'
db.alter_column(u'build_ground', 'offer', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'Ground.permission'
db.alter_column(u'build_ground', 'permission', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'Building.permission'
db.alter_column(u'build_building', 'permission', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'Building.offer'
db.alter_column(u'build_building', 'offer', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.facility_permission'
db.alter_column(u'build_contractdocuments', 'facility_permission', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.building_permissions'
db.alter_column(u'build_contractdocuments', 'building_permissions', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.mo_notice_to_citizen'
db.alter_column(u'build_contractdocuments', 'mo_notice_to_citizen', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.cost_infos'
db.alter_column(u'build_contractdocuments', 'cost_infos', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.acceptance_acts'
db.alter_column(u'build_contractdocuments', 'acceptance_acts', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.mun_act_to_fond'
db.alter_column(u'build_contractdocuments', 'mun_act_to_fond', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.hiring_contract'
db.alter_column(u'build_contractdocuments', 'hiring_contract', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.mo_certificate'
db.alter_column(u'build_contractdocuments', 'mo_certificate', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.photos'
db.alter_column(u'build_contractdocuments', 'photos', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.land_right_stating'
db.alter_column(u'build_contractdocuments', 'land_right_stating', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.transmission_acts'
db.alter_column(u'build_contractdocuments', 'transmission_acts', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.approval_citizen_statement'
db.alter_column(u'build_contractdocuments', 'approval_citizen_statement', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.mun_contracts'
db.alter_column(u'build_contractdocuments', 'mun_contracts', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.protocols'
db.alter_column(u'build_contractdocuments', 'protocols', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
# Changing field 'ContractDocuments.tec_passport_tec_plan'
db.alter_column(u'build_contractdocuments', 'tec_passport_tec_plan', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True))
models = {
'build.building': {
'Meta': {'object_name': 'Building'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'offer': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contract': {
'Meta': {'object_name': 'Contract'},
'area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'docs': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.ContractDocuments']", 'null': 'True', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'has_trouble_docs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'period_of_payment': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'sign_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'summa': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contractdocuments': {
'Meta': {'object_name': 'ContractDocuments'},
'acceptance_acts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'approval_citizen_statement': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'building_permissions': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cost_infos': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'facility_permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'hiring_contract': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'land_right_stating': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mo_certificate': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mo_notice_to_citizen': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mun_act_to_fond': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'mun_contracts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'photos': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'protocols': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tec_passport_tec_plan': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'transmission_acts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'build.ground': {
'Meta': {'object_name': 'Ground'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'area': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'offer': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.basehallway': {
'Meta': {'object_name': 'BaseHallway'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.basekitchen': {
'Meta': {'object_name': 'BaseKitchen'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sink_with_mixer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.baseroom': {
'Meta': {'object_name': 'BaseRoom'},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.basewc': {
'Meta': {'object_name': 'BaseWC'},
'bath_with_mixer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_toilet': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_tower_dryer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sink_with_mixer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.developer': {
'Meta': {'object_name': 'Developer'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'boss_position': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'face_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
},
u'core.hallway': {
'Meta': {'object_name': 'Hallway', '_ormbases': ['core.BaseHallway']},
u'basehallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseHallway']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.kitchen': {
'Meta': {'object_name': 'Kitchen', '_ormbases': ['core.BaseKitchen']},
u'basekitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseKitchen']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'stove': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.room': {
'Meta': {'object_name': 'Room', '_ormbases': ['core.BaseRoom']},
u'baseroom_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseRoom']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.wc': {
'Meta': {'object_name': 'WC', '_ormbases': ['core.BaseWC']},
u'basewc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseWC']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'separate': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'mo.mo': {
'Meta': {'object_name': 'MO'},
'common_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_economy': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_percentage': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_spent': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'has_trouble': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['build']
| |
# -----------------------------------------------------------------------------
# ply: lex.py
#
# Author: David M. Beazley (dave@dabeaz.com)
# Modification for pyglet by Alex Holkner (alex.holkner@gmail.com)
# Modification for ctypesgen by Tim Maxwell (timmaxw@gmail.com) <tm>
#
# Copyright (C) 2001-2006, David M. Beazley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See the file LICENSE for a complete copy of the LGPL.
# -----------------------------------------------------------------------------
__version__ = "2.2"
import re, sys, types, os.path, importlib
# Regular expression used to match valid token names
_is_identifier = re.compile(r"^[a-zA-Z0-9_]+$")
_INSTANCETYPE = object
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
def skip(self, n):
self.lexer.skip(n)
# -----------------------------------------------------------------------------
# Lexer class
#
# This class encapsulates all of the methods and data associated with a lexer.
#
# input() - Store a new string in the lexer
# token() - Get the next token
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexdebug = 0 # Debugging mode
self.lexoptimize = 0 # Optimized mode
def clone(self, object=None):
c = Lexer()
c.lexstatere = self.lexstatere
c.lexstateinfo = self.lexstateinfo
c.lexstateretext = self.lexstateretext
c.lexstate = self.lexstate
c.lexstatestack = self.lexstatestack
c.lexstateignore = self.lexstateignore
c.lexstateerrorf = self.lexstateerrorf
c.lexreflags = self.lexreflags
c.lexdata = self.lexdata
c.lexpos = self.lexpos
c.lexlen = self.lexlen
c.lextokens = self.lextokens
c.lexdebug = self.lexdebug
c.lineno = self.lineno
c.lexoptimize = self.lexoptimize
c.lexliterals = self.lexliterals
c.lexmodule = self.lexmodule
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
# Set up other attributes
c.begin(c.lexstate)
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
# <tm> 25 June 2008 added 'outputdir'
def writetab(self, tabfile, outputdir=""):
tf = open(os.path.join(outputdir, tabfile) + ".py", "w")
tf.write(
"# %s.py. This file automatically created by PLY (version %s). Don't edit!\n"
% (tabfile, __version__)
)
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = {}
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i], _funcs_to_names(lre[i][1])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = {}
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
lextab = importlib.import_module(tabfile)
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for key, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append(
(re.compile(lre[i][0], lextab._lexreflags), _names_to_funcs(lre[i][1], fdict))
)
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = {}
for key, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin("INITIAL")
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
if not (isinstance(s, bytes) or isinstance(s, str)):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, "")
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# token() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Set last match in lexer so that rules can access it if they want
self.lexmatch = m
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.groups = m.groups()
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
lexpos = m.end()
i = m.lastindex
func, tok.type = lexindexfunc[i]
self.lexpos = lexpos
if not func:
# If no token type was set, it's an ignored token
if tok.type:
return tok
break
# if func not callable, it means it's an ignored token
if not hasattr(func, "__call__"):
break
# If token is processed by a function, call it
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
# Added for pyglet/tools/wrapper/cparser.py by Alex
# Holkner on 20/Jan/2007
lexdata = self.lexdata
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
# Allow any single-character literal also for
# pyglet/tools/wrapper/cparser.py by Alex Holkner on
# 20/Jan/2007
if newtok.type not in self.lextokens and len(newtok.type) > 1:
raise LexError(
"%s:%d: Rule '%s' returned an unknown token type '%s'"
% (
func.__code__.co_filename,
func.__code__.co_firstlineno,
func.__name__,
newtok.type,
),
lexdata[lexpos:],
)
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.lexer = self
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError(
"Scanning error. Illegal character '%s'" % (lexdata[lexpos]),
lexdata[lexpos:],
)
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError(
"Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos),
lexdata[lexpos:],
)
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# -----------------------------------------------------------------------------
# _validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the filename.
# -----------------------------------------------------------------------------
def _validate_file(filename):
import os.path
base, ext = os.path.splitext(filename)
if ext != ".py":
return 1 # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return 1 # Oh well
fre = re.compile(r"\s*def\s+(t_[a-zA-Z_0-9]*)\(")
sre = re.compile(r"\s*(t_[a-zA-Z_0-9]*)\s*=")
counthash = {}
linen = 1
noerror = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
print(
"%s:%d: Rule %s redefined. Previously defined on line %d"
% (filename, linen, name, prev)
)
noerror = 0
linen += 1
return noerror
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist):
result = []
for f in funclist:
if f and f[0]:
result.append((f[0].__name__, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict):
if not relist:
return []
regex = "|".join(relist)
try:
lexre = re.compile(regex, re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, handle.__name__[2:])
elif handle is not None:
# If rule was specified as a string, we build an anonymous
# callback function to carry out the action
if f.find("ignore_") > 0:
lexindexfunc[i] = (None, None)
print("IGNORE", f)
else:
lexindexfunc[i] = (None, f[2:])
return [(lexre, lexindexfunc)], [regex]
except Exception as e:
m = int(len(relist) / 2)
if m == 0:
m = 1
llist, lre = _form_master_re(relist[:m], reflags, ldict)
rlist, rre = _form_master_re(relist[m:], reflags, ldict)
return llist + rlist, lre + rre
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
nonstate = 1
parts = s.split("_")
for i in range(1, len(parts)):
if parts[i] not in names and parts[i] != "ANY":
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ("INITIAL",)
if "ANY" in states:
states = tuple(names.keys())
tokenname = "_".join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
# cls added for pyglet/tools/wrapper/cparser.py by Alex Holkner on 22/Jan/2007
# <tm> 25 June 2008 added 'outputdir'
def lex(
module=None,
object=None,
debug=0,
optimize=0,
lextab="lextab",
reflags=0,
nowarn=0,
outputdir="",
cls=Lexer,
):
global lexer
ldict = None
stateinfo = {"INITIAL": "inclusive"}
error = 0
files = {}
lexobj = cls()
lexobj.lexdebug = debug
lexobj.lexoptimize = optimize
global token, input
if nowarn:
warn = 0
else:
warn = 1
if object:
module = object
if module:
# User supplied a module object.
if isinstance(module, types.ModuleType):
ldict = module.__dict__
elif isinstance(module, _INSTANCETYPE):
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = {}
for (i, v) in _items:
ldict[i] = v
else:
raise ValueError("Expected a module or instance")
lexobj.lexmodule = module
else:
# No module given. We might be able to get information from the caller.
try:
raise RuntimeError
except RuntimeError:
e, b, t = sys.exc_info()
f = t.tb_frame
f = f.f_back # Walk out to our calling function
ldict = f.f_globals # Grab its globals dictionary
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Get the tokens, states, and literals variables (if any)
if module and isinstance(module, _INSTANCETYPE):
tokens = getattr(module, "tokens", None)
states = getattr(module, "states", None)
literals = getattr(module, "literals", "")
else:
tokens = ldict.get("tokens", None)
states = ldict.get("states", None)
literals = ldict.get("literals", "")
if not tokens:
raise SyntaxError("lex: module does not define 'tokens'")
if not (isinstance(tokens, list) or isinstance(tokens, tuple)):
raise SyntaxError("lex: tokens must be a list or tuple.")
# Build a dictionary of valid token names
lexobj.lextokens = {}
if not optimize:
for n in tokens:
if not _is_identifier.match(n):
print("lex: Bad token name '%s'" % n)
error = 1
if warn and n in lexobj.lextokens:
print("lex: Warning. Token '%s' multiply defined." % n)
lexobj.lextokens[n] = None
else:
for n in tokens:
lexobj.lextokens[n] = None
if debug:
print("lex: tokens = '%s'" % list(lexobj.lextokens.keys()))
try:
for c in literals:
if not (isinstance(c, bytes) or isinstance(c, str)) or len(c) > 1:
print("lex: Invalid literal %s. Must be a single character" % repr(c))
error = 1
continue
except TypeError:
print("lex: Invalid literals specification. literals must be a sequence of characters.")
error = 1
lexobj.lexliterals = literals
# Build statemap
if states:
if not (isinstance(states, tuple) or isinstance(states, list)):
print("lex: states must be defined as a tuple or list.")
error = 1
else:
for s in states:
if not isinstance(s, tuple) or len(s) != 2:
print(
"lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')"
% repr(s)
)
error = 1
continue
name, statetype = s
if not isinstance(name, str):
print("lex: state name %s must be a string" % repr(name))
error = 1
continue
if not (statetype == "inclusive" or statetype == "exclusive"):
print("lex: state type for state %s must be 'inclusive' or 'exclusive'" % name)
error = 1
continue
if name in stateinfo:
print("lex: state '%s' already defined." % name)
error = 1
continue
stateinfo[name] = statetype
# Get a list of symbols with the t_ or s_ prefix
tsymbols = [f for f in ldict.keys() if f[:2] == "t_"]
# Now build up a list of functions and a list of strings
funcsym = {} # Symbols defined as functions
strsym = {} # Symbols defined as strings
toknames = {} # Mapping of symbols to token names
for s in stateinfo.keys():
funcsym[s] = []
strsym[s] = []
ignore = {} # Ignore strings by state
errorf = {} # Error functions by state
if len(tsymbols) == 0:
raise SyntaxError("lex: no rules of the form t_rulename are defined.")
for f in tsymbols:
t = ldict[f]
states, tokname = _statetoken(f, stateinfo)
toknames[f] = tokname
if hasattr(t, "__call__"):
for s in states:
funcsym[s].append((f, t))
elif isinstance(t, bytes) or isinstance(t, str):
for s in states:
strsym[s].append((f, t))
else:
print("lex: %s not defined as a function or string" % f)
error = 1
# Sort the functions by line number
for f in funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in strsym.values():
s.sort(key=lambda x: len(x[1]))
regexs = {}
# Build the master regular expressions
for state in stateinfo.keys():
regex_list = []
# Add rules defined by functions first
for fname, f in funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
files[file] = None
tokname = toknames[fname]
ismethod = isinstance(f, types.MethodType)
if not optimize:
nargs = f.__code__.co_argcount
if ismethod:
reqargs = 2
else:
reqargs = 1
if nargs > reqargs:
print("%s:%d: Rule '%s' has too many arguments." % (file, line, f.__name__))
error = 1
continue
if nargs < reqargs:
print("%s:%d: Rule '%s' requires an argument." % (file, line, f.__name__))
error = 1
continue
if tokname == "ignore":
print(
"%s:%d: Rule '%s' must be defined as a string." % (file, line, f.__name__)
)
error = 1
continue
if tokname == "error":
errorf[state] = f
continue
if f.__doc__:
if not optimize:
try:
c = re.compile("(?P<%s>%s)" % (f.__name__, f.__doc__), re.VERBOSE | reflags)
if c.match(""):
print(
"%s:%d: Regular expression for rule '%s' matches empty string."
% (file, line, f.__name__)
)
error = 1
continue
except re.error as e:
print(
"%s:%d: Invalid regular expression for rule '%s'. %s"
% (file, line, f.__name__, e)
)
if "#" in f.__doc__:
print(
"%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'."
% (file, line, f.__name__)
)
error = 1
continue
if debug:
print(
"lex: Adding rule %s -> '%s' (state '%s')"
% (f.__name__, f.__doc__, state)
)
# Okay. The regular expression seemed okay. Let's append it to the master regular
# expression we're building
regex_list.append("(?P<%s>%s)" % (f.__name__, f.__doc__))
else:
print(
"%s:%d: No regular expression defined for rule '%s'" % (file, line, f.__name__)
)
# Now add all of the simple rules
for name, r in strsym[state]:
tokname = toknames[name]
if tokname == "ignore":
ignore[state] = r
continue
if not optimize:
if tokname == "error":
raise SyntaxError("lex: Rule '%s' must be defined as a function" % name)
error = 1
continue
if tokname not in lexobj.lextokens and tokname.find("ignore_") < 0:
print("lex: Rule '%s' defined for an unspecified token %s." % (name, tokname))
error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name, r), re.VERBOSE | reflags)
if c.match(""):
print("lex: Regular expression for rule '%s' matches empty string." % name)
error = 1
continue
except re.error as e:
print("lex: Invalid regular expression for rule '%s'. %s" % (name, e))
if "#" in r:
print("lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name)
error = 1
continue
if debug:
print("lex: Adding rule %s -> '%s' (state '%s')" % (name, r, state))
regex_list.append("(?P<%s>%s)" % (name, r))
if not regex_list:
print("lex: No rules defined for state '%s'" % state)
error = 1
regexs[state] = regex_list
if not optimize:
for f in files.keys():
if not _validate_file(f):
error = 1
if error:
raise SyntaxError("lex: Unable to build lexer.")
# From this point forward, we're reasonably confident that we can build the lexer.
# No more errors will be generated, but there might be some warning messages.
# Build the master regular expressions
for state in regexs.keys():
lexre, re_text = _form_master_re(regexs[state], reflags, ldict)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
if debug:
for i in range(len(re_text)):
print("lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i]))
# For inclusive states, we need to add the INITIAL state
for state, type in stateinfo.items():
if state != "INITIAL" and type == "inclusive":
lexobj.lexstatere[state].extend(lexobj.lexstatere["INITIAL"])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext["INITIAL"])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
# Set up ignore variables
lexobj.lexstateignore = ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL", "")
# Set up error functions
lexobj.lexstateerrorf = errorf
lexobj.lexerrorf = errorf.get("INITIAL", None)
if warn and not lexobj.lexerrorf:
print("lex: Warning. no t_error rule is defined.")
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == "exclusive":
if warn and s not in errorf:
print("lex: Warning. no error rule is defined for exclusive state '%s'" % s)
if warn and s not in ignore and lexobj.lexignore:
print("lex: Warning. no ignore rule is defined for exclusive state '%s'" % s)
elif stype == "inclusive":
if s not in errorf:
errorf[s] = errorf.get("INITIAL", None)
if s not in ignore:
ignore[s] = ignore.get("INITIAL", "")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab, outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
print("Reading from standard input (type EOF to end):")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
print("(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| |
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.iteritems()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
# NOTE: FORWARD pass
N, D = features.shape
T = captions.shape[-1]
cache = {}
# N x H
h0, cache['affine'] = affine_forward(features, W_proj, b_proj)
# N x T x W
word_embed, cache['word_embedding'] = word_embedding_forward(captions_in, W_embed)
if self.cell_type == 'rnn':
# N x T x H
h, cache['rnn'] = rnn_forward(word_embed, h0, Wx, Wh, b)
elif self.cell_type == 'lstm':
h, cache['lstm'] = lstm_forward(word_embed, h0, Wx, Wh, b)
else:
raise ValueError('cell type not implemented' % (self.cell_type))
# N x T x V
scores, cache['temporal_affine'] = temporal_affine_forward(h, W_vocab, b_vocab)
loss, dscores = temporal_softmax_loss(scores, captions_out, mask)
# NOTE: BACKWARD pass
dh, grads['W_vocab'], grads['b_vocab']= temporal_affine_backward(
dscores, cache['temporal_affine'])
if self.cell_type == 'rnn':
(dword_embed, dh0,
grads['Wx'], grads['Wh'], grads['b']) = rnn_backward(dh, cache['rnn'])
elif self.cell_type == 'lstm':
(dword_embed, dh0,
grads['Wx'], grads['Wh'], grads['b']) = lstm_backward(dh, cache['lstm'])
grads['W_embed'] = word_embedding_backward(
dword_embed, cache['word_embedding'])
_, grads['W_proj'], grads['b_proj'] = affine_backward(
dh0, cache['affine'])
pass
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
# N x 1
prev_words = self._start * np.ones((N, 1)).astype('int')
# N x H
h0, _ = affine_forward(features, W_proj, b_proj)
prev_h = h0
if self.cell_type == 'lstm':
prev_c = np.zeros_like(h0)
for t in range(max_length):
word_embed, _ = word_embedding_forward(prev_words, W_embed)
word_embed = word_embed.reshape((N, -1))
prev_h = prev_h.reshape((N, -1))
if self.cell_type == 'rnn':
h, _ = rnn_step_forward(word_embed, prev_h, Wx, Wh, b)
elif self.cell_type == 'lstm':
h, c, _ = lstm_step_forward(word_embed, prev_h, prev_c, Wx, Wh, b)
else:
raise ValueError('cell type not implemented' % (self.cell_type))
# h = h.reshape((N, 1, -1))
scores, _ = temporal_affine_forward(
h[:, np.newaxis, :], W_vocab, b_vocab)
# print 'scores shape', scores.shape
prev_words = np.argmax(scores, axis=-1)
# print prev_words.shape, captions[:, t].shape
captions[:, t] = prev_words.reshape((-1))
if self.cell_type == 'lstm':
prev_c = c
prev_h = h
pass
pass
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
| |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for Blink.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import re
import sys
_EXCLUDED_PATHS = ()
def _CheckForVersionControlConflictsInFile(input_api, f):
pattern = input_api.re.compile('^(?:<<<<<<<|>>>>>>>) |^=======$')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckForVersionControlConflicts(input_api, output_api):
"""Usually this is not intentional and will cause a compile failure."""
errors = []
for f in input_api.AffectedFiles():
errors.extend(_CheckForVersionControlConflictsInFile(input_api, f))
results = []
if errors:
results.append(output_api.PresubmitError(
'Version control conflict markers found, please resolve.', errors))
return results
def _CheckWatchlist(input_api, output_api):
"""Check that the WATCHLIST file parses correctly."""
errors = []
for f in input_api.AffectedFiles():
if f.LocalPath() != 'WATCHLISTS':
continue
import StringIO
import logging
import watchlists
log_buffer = StringIO.StringIO()
log_handler = logging.StreamHandler(log_buffer)
log_handler.setFormatter(
logging.Formatter('%(levelname)s: %(message)s'))
logger = logging.getLogger()
logger.addHandler(log_handler)
wl = watchlists.Watchlists(input_api.change.RepositoryRoot())
logger.removeHandler(log_handler)
log_handler.flush()
log_buffer.flush()
if log_buffer.getvalue():
errors.append(output_api.PresubmitError(
'Cannot parse WATCHLISTS file, please resolve.',
log_buffer.getvalue().splitlines()))
return errors
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
# We should figure out what license checks we actually want to use.
license_header = r'.*'
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, excluded_paths=_EXCLUDED_PATHS,
maxlen=800, license_header=license_header))
results.extend(_CheckForVersionControlConflicts(input_api, output_api))
results.extend(_CheckPatchFiles(input_api, output_api))
results.extend(_CheckTestExpectations(input_api, output_api))
results.extend(_CheckChromiumPlatformMacros(input_api, output_api))
results.extend(_CheckWatchlist(input_api, output_api))
results.extend(_CheckFilePermissions(input_api, output_api))
return results
def _CheckPatchFiles(input_api, output_api):
problems = [f.LocalPath() for f in input_api.AffectedFiles()
if f.LocalPath().endswith(('.orig', '.rej'))]
if problems:
return [output_api.PresubmitError(
"Don't commit .rej and .orig files.", problems)]
else:
return []
def _CheckTestExpectations(input_api, output_api):
local_paths = [f.LocalPath() for f in input_api.AffectedFiles()]
if any('LayoutTests' in path for path in local_paths):
lint_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'lint-test-expectations')
_, errs = input_api.subprocess.Popen(
[input_api.python_executable, lint_path],
stdout=input_api.subprocess.PIPE,
stderr=input_api.subprocess.PIPE).communicate()
if not errs:
return [output_api.PresubmitError(
"lint-test-expectations failed "
"to produce output; check by hand. ")]
if errs.strip() != 'Lint succeeded.':
return [output_api.PresubmitError(errs)]
return []
def _CheckStyle(input_api, output_api):
# Files that follow Chromium's coding style do not include capital letters.
re_chromium_style_file = re.compile(r'\b[a-z_]+\.(cc|h)$')
style_checker_path = input_api.os_path.join(input_api.PresubmitLocalPath(),
'Tools', 'Scripts', 'check-webkit-style')
args = ([input_api.python_executable, style_checker_path, '--diff-files']
+ [input_api.os_path.join('..', '..', f.LocalPath())
for f in input_api.AffectedFiles()
# Filter out files that follow Chromium's coding style.
if not re_chromium_style_file.search(f.LocalPath())])
results = []
try:
child = input_api.subprocess.Popen(args,
stderr=input_api.subprocess.PIPE)
_, stderrdata = child.communicate()
if child.returncode != 0:
results.append(output_api.PresubmitError(
'check-webkit-style failed', [stderrdata]))
except Exception as e:
results.append(output_api.PresubmitNotifyResult(
'Could not run check-webkit-style', [str(e)]))
return results
def _CheckChromiumPlatformMacros(input_api, output_api, source_file_filter=None):
"""Ensures that Blink code uses WTF's platform macros instead of
Chromium's. Using the latter has resulted in at least one subtle
build breakage."""
os_macro_re = input_api.re.compile(r'^\s*#(el)?if.*\bOS_')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not os_macro_re.search(x),
input_api, source_file_filter)
errors = ['Found use of Chromium OS_* macro in %s. '
'Use WTF platform macros instead.' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptWarning('\n'.join(errors))]
return []
def _CheckForPrintfDebugging(input_api, output_api):
"""Generally speaking, we'd prefer not to land patches that printf
debug output."""
printf_re = input_api.re.compile(r'^\s*(printf\(|fprintf\(stderr,)')
errors = input_api.canned_checks._FindNewViolationsOfRule(
lambda _, x: not printf_re.search(x),
input_api, None)
errors = [' * %s' % violation for violation in errors]
if errors:
return [output_api.PresubmitPromptOrNotify(
'printf debugging is best debugging! That said, it might '
'be a good idea to drop the following occurances from '
'your patch before uploading:\n%s' % '\n'.join(errors))]
return []
def _CheckForFailInFile(input_api, f):
pattern = input_api.re.compile('^FAIL')
errors = []
for line_num, line in f.ChangedContents():
if pattern.match(line):
errors.append(' %s:%d %s' % (f.LocalPath(), line_num, line))
return errors
def _CheckFilePermissions(input_api, output_api):
"""Check that all files have their permissions properly set."""
if input_api.platform == 'win32':
return []
args = [input_api.python_executable,
input_api.os_path.join(
input_api.change.RepositoryRoot(),
'tools/checkperms/checkperms.py'),
'--root', input_api.change.RepositoryRoot()]
for f in input_api.AffectedFiles():
args += ['--file', f.LocalPath()]
try:
input_api.subprocess.check_output(args)
return []
except input_api.subprocess.CalledProcessError as error:
return [output_api.PresubmitError(
'checkperms.py failed:',
long_text=error.output)]
def _CheckForInvalidPreferenceError(input_api, output_api):
pattern = input_api.re.compile('Invalid name for preference: (.+)')
results = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('-expected.txt'):
continue
for line_num, line in f.ChangedContents():
error = pattern.search(line)
if error:
results.append(output_api.PresubmitError('Found an invalid preference %s in expected result %s:%s' % (error.group(1), f, line_num)))
return results
def _CheckForForbiddenNamespace(input_api, output_api):
"""Checks that Blink uses Chromium namespaces only in permitted code."""
# This list is not exhaustive, but covers likely ones.
chromium_namespaces = ["base", "cc", "content", "gfx", "net", "ui"]
chromium_classes = ["scoped_ptr", "scoped_refptr"]
def source_file_filter(path):
return input_api.FilterSourceFile(path,
white_list=[r'third_party/WebKit/Source/.*\.(h|cpp)$'],
black_list=[r'third_party/WebKit/Source/(platform|wtf|web)/'])
comment_re = input_api.re.compile(r'^\s*//')
result = []
for namespace in chromium_namespaces:
namespace_re = input_api.re.compile(r'\b{0}::|^\s*using namespace {0};|^\s*namespace {0} \{{'.format(input_api.re.escape(namespace)))
uses_namespace_outside_comments = lambda line: namespace_re.search(line) and not comment_re.search(line)
errors = input_api.canned_checks._FindNewViolationsOfRule(lambda _, line: not uses_namespace_outside_comments(line),
input_api, source_file_filter)
if errors:
result += [output_api.PresubmitError('Do not use Chromium namespace {} inside Blink core:\n{}'.format(namespace, '\n'.join(errors)))]
for class_name in chromium_classes:
class_re = input_api.re.compile(r'\b{0}\b'.format(input_api.re.escape(class_name)))
uses_class_outside_comments = lambda line: class_re.search(line) and not comment_re.search(line)
errors = input_api.canned_checks._FindNewViolationsOfRule(lambda _, line: not uses_class_outside_comments(line),
input_api, source_file_filter)
if errors:
result += [output_api.PresubmitError('Do not use Chromium class {} inside Blink core:\n{}'.format(class_name, '\n'.join(errors)))]
return result
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckStyle(input_api, output_api))
results.extend(_CheckForPrintfDebugging(input_api, output_api))
results.extend(_CheckForInvalidPreferenceError(input_api, output_api))
results.extend(_CheckForForbiddenNamespace(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
json_url='http://chromium-status.appspot.com/current?format=json'))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
return results
def GetPreferredTryMasters(project, change):
import json
import os.path
import platform
import subprocess
cq_config_path = os.path.join(
change.RepositoryRoot(), 'infra', 'config', 'cq.cfg')
# commit_queue.py below is a script in depot_tools directory, which has a
# 'builders' command to retrieve a list of CQ builders from the CQ config.
is_win = platform.system() == 'Windows'
masters = json.loads(subprocess.check_output(
['commit_queue', 'builders', cq_config_path], shell=is_win))
try_config = {}
for master in masters:
try_config.setdefault(master, {})
for builder in masters[master]:
# Do not trigger presubmit builders, since they're likely to fail
# (e.g. OWNERS checks before finished code review), and we're
# running local presubmit anyway.
if 'presubmit' not in builder:
try_config[master][builder] = ['defaulttests']
return try_config
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._disks_operations import build_create_or_update_request_initial, build_delete_request_initial, build_get_request, build_grant_access_request_initial, build_list_by_resource_group_request, build_list_request, build_revoke_access_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DisksOperations:
"""DisksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2020_09_30.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs: Any
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk, 'Disk')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.Disk",
**kwargs: Any
) -> AsyncLROPoller["_models.Disk"]:
"""Creates or updates a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Put disk operation.
:type disk: ~azure.mgmt.compute.v2020_09_30.models.Disk
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.Disk]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs: Any
) -> "_models.Disk":
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(disk, 'DiskUpdate')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('Disk', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
disk_name: str,
disk: "_models.DiskUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.Disk"]:
"""Updates (patches) a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param disk: Disk object supplied in the body of the Patch disk operation.
:type disk: ~azure.mgmt.compute.v2020_09_30.models.DiskUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Disk or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.Disk]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
disk=disk,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> "_models.Disk":
"""Gets information about a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Disk, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2020_09_30.models.Disk
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Disk"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Disk', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.DiskList"]:
"""Lists all the disks under a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_09_30.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.DiskList"]:
"""Lists all the disks under a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DiskList or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2020_09_30.models.DiskList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DiskList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("DiskList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/disks'} # type: ignore
async def _grant_access_initial(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> Optional["_models.AccessUri"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.AccessUri"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(grant_access_data, 'GrantAccessData')
request = build_grant_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
content_type=content_type,
json=_json,
template_url=self._grant_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_grant_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
@distributed_trace_async
async def begin_grant_access(
self,
resource_group_name: str,
disk_name: str,
grant_access_data: "_models.GrantAccessData",
**kwargs: Any
) -> AsyncLROPoller["_models.AccessUri"]:
"""Grants access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:param grant_access_data: Access data object supplied in the body of the get disk access
operation.
:type grant_access_data: ~azure.mgmt.compute.v2020_09_30.models.GrantAccessData
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either AccessUri or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2020_09_30.models.AccessUri]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.AccessUri"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._grant_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
grant_access_data=grant_access_data,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('AccessUri', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_grant_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/beginGetAccess'} # type: ignore
async def _revoke_access_initial(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_revoke_access_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
disk_name=disk_name,
template_url=self._revoke_access_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revoke_access_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
@distributed_trace_async
async def begin_revoke_access(
self,
resource_group_name: str,
disk_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Revokes access to a disk.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param disk_name: The name of the managed disk that is being created. The name can't be changed
after the disk is created. Supported characters for the name are a-z, A-Z, 0-9 and _. The
maximum name length is 80 characters.
:type disk_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._revoke_access_initial(
resource_group_name=resource_group_name,
disk_name=disk_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke_access.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/disks/{diskName}/endGetAccess'} # type: ignore
| |
import unittest
import zen
class FlowTestCase(unittest.TestCase):
def test_min_cut(self):
#sample graph
G = zen.DiGraph()
G.add_node('a')
G.add_node('b')
G.add_node('c')
G.add_node('d')
G.add_node('e')
G.add_node('f')
G.add_node('g')
G.add_node('h')
G.add_edge('a','b',weight=10)
G.add_edge('a','c',weight=5)
G.add_edge('a','d',weight=15)
G.add_edge('b','e',weight=9)
G.add_edge('b','f',weight=15)
G.add_edge('b','c',weight=4)
G.add_edge('c','f',weight=8)
G.add_edge('c','d',weight=4)
G.add_edge('d','g',weight=30)
G.add_edge('g','c',weight=6)
G.add_edge('e','f',weight=15)
G.add_edge('e','h',weight=10)
G.add_edge('f','g',weight=15)
G.add_edge('f','h',weight=10)
G.add_edge('g','h',weight=10)
self.assertEquals(28, zen.min_cut(G,'a','h','weight'))
self.assertEquals(3, zen.min_cut(G,'a','h','unit'))
G.set_weight('d','g', float('inf'))
self.assertEquals(28, zen.min_cut_(G,0,7,'weight'))
self.assertEquals(3, zen.min_cut_(G,0,7,'unit'))
G.set_weight('a','c', float('inf'))
G.set_weight('c','f', float('inf'))
G.set_weight('f','h', float('inf'))
self.assertEquals(float('inf'), zen.min_cut(G,'a','h','weight'))
self.assertEquals(3, zen.min_cut(G,'a','h','unit'))
def test_min_cut_(self):
#sample graph
G = zen.DiGraph()
G.add_node('a')
G.add_node('b')
G.add_node('c')
G.add_node('d')
G.add_node('e')
G.add_node('f')
G.add_node('g')
G.add_node('h')
G.add_edge('a','b',weight=10)
G.add_edge('a','c',weight=5)
G.add_edge('a','d',weight=15)
G.add_edge('b','e',weight=9)
G.add_edge('b','f',weight=15)
G.add_edge('b','c',weight=4)
G.add_edge('c','f',weight=8)
G.add_edge('c','d',weight=4)
G.add_edge('d','g',weight=30)
G.add_edge('g','c',weight=6)
G.add_edge('e','f',weight=15)
G.add_edge('e','h',weight=10)
G.add_edge('f','g',weight=15)
G.add_edge('f','h',weight=10)
G.add_edge('g','h',weight=10)
self.assertEquals(28, zen.min_cut_(G,0,7,'weight'))
self.assertEquals(3, zen.min_cut_(G,0,7,'unit'))
G.set_weight('d','g', float('inf'))
self.assertEquals(28, zen.min_cut_(G,0,7,'weight'))
self.assertEquals(3, zen.min_cut_(G,0,7,'unit'))
G.set_weight('a','c', float('inf'))
G.set_weight('c','f', float('inf'))
G.set_weight('f','h', float('inf'))
self.assertEquals(float('inf'), zen.min_cut(G,'a','h','weight'))
self.assertEquals(3, zen.min_cut(G,'a','h','unit'))
def test_min_cut_set(self):
#sample graph
G = zen.DiGraph()
G.add_node('a')
G.add_node('b')
G.add_node('c')
G.add_node('d')
G.add_node('e')
G.add_node('f')
G.add_node('g')
G.add_node('h')
G.add_edge('a','b',weight=10)
G.add_edge('a','c',weight=5)
G.add_edge('a','d',weight=15)
G.add_edge('b','e',weight=9)
G.add_edge('b','f',weight=15)
G.add_edge('b','c',weight=4)
G.add_edge('c','f',weight=8)
G.add_edge('c','d',weight=4)
G.add_edge('d','g',weight=30)
G.add_edge('g','c',weight=6)
G.add_edge('e','f',weight=15)
G.add_edge('e','h',weight=10)
G.add_edge('f','g',weight=15)
G.add_edge('f','h',weight=10)
G.add_edge('g','h',weight=10)
cut_set = zen.min_cut_set(G,'a','h','weight')
self.assertEquals(3, len(cut_set))
self.assertTrue(('a','b') in cut_set)
self.assertTrue(('c','f') in cut_set)
self.assertTrue(('g','h') in cut_set)
cut_set = zen.min_cut_set(G,'a','h','unit')
self.assertEquals(3, len(cut_set))
self.assertTrue(('a','b') in cut_set)
self.assertTrue(('a','c') in cut_set)
self.assertTrue(('a','d') in cut_set)
G.set_weight('d','g', float('inf'))
cut_set = zen.min_cut_set(G,'a','h','weight')
self.assertEquals(3, len(cut_set))
self.assertTrue(('a','b') in cut_set)
self.assertTrue(('c','f') in cut_set)
self.assertTrue(('g','h') in cut_set)
cut_set = zen.min_cut_set(G,'a','h','unit')
self.assertEquals(3, len(cut_set))
self.assertTrue(('a','b') in cut_set)
self.assertTrue(('a','c') in cut_set)
self.assertTrue(('a','d') in cut_set)
G.set_weight('a','c', float('inf'))
G.set_weight('c','f', float('inf'))
G.set_weight('f','h', float('inf'))
cut_set = zen.min_cut_set(G,'a','h','weight')
self.assertEquals(3, len(cut_set))
self.assertTrue(('a','b') in cut_set)
self.assertTrue(('a','c') in cut_set)
self.assertTrue(('a','d') in cut_set)
cut_set = zen.min_cut_set(G,'a','h','unit')
self.assertEquals(3, len(cut_set))
self.assertTrue(('a','b') in cut_set)
self.assertTrue(('a','c') in cut_set)
self.assertTrue(('a','d') in cut_set)
def test_min_cut_set_(self):
#sample graph
G = zen.DiGraph()
G.add_node('a') #node 0
G.add_node('b')
G.add_node('c')
G.add_node('d')
G.add_node('e') #node 4
G.add_node('f')
G.add_node('g')
G.add_node('h') #node 7
G.add_edge('a','b',weight=10) #edge 0
G.add_edge('a','c',weight=5)
G.add_edge('a','d',weight=15)
G.add_edge('b','e',weight=9)
G.add_edge('b','f',weight=15)
G.add_edge('b','c',weight=4) #edge 5
G.add_edge('c','f',weight=8)
G.add_edge('c','d',weight=4)
G.add_edge('d','g',weight=30)
G.add_edge('g','c',weight=6)
G.add_edge('e','f',weight=15) #edge 10
G.add_edge('e','h',weight=10)
G.add_edge('f','g',weight=15)
G.add_edge('f','h',weight=10)
G.add_edge('g','h',weight=10) #edge 14
cut_set = zen.min_cut_set_(G,0,7,'weight')
self.assertEquals(3, len(cut_set))
self.assertTrue(0 in cut_set)
self.assertTrue(6 in cut_set)
self.assertTrue(14 in cut_set)
cut_set = zen.min_cut_set_(G,0,7,'unit')
self.assertEquals(3, len(cut_set))
self.assertTrue(0 in cut_set)
self.assertTrue(1 in cut_set)
self.assertTrue(2 in cut_set)
G.set_weight('d','g', float('inf'))
cut_set = zen.min_cut_set_(G,0,7,'weight')
self.assertEquals(3, len(cut_set))
self.assertTrue(0 in cut_set)
self.assertTrue(6 in cut_set)
self.assertTrue(14 in cut_set)
cut_set = zen.min_cut_set_(G,0,7,'unit')
self.assertEquals(3, len(cut_set))
self.assertTrue(0 in cut_set)
self.assertTrue(1 in cut_set)
self.assertTrue(2 in cut_set)
G.set_weight('a','c', float('inf'))
G.set_weight('c','f', float('inf'))
G.set_weight('f','h', float('inf'))
cut_set = zen.min_cut_set_(G,0,7,'weight')
self.assertEquals(3, len(cut_set))
self.assertTrue(0 in cut_set)
self.assertTrue(1 in cut_set)
self.assertTrue(2 in cut_set)
cut_set = zen.min_cut_set_(G,0,7,'unit')
self.assertEquals(3, len(cut_set))
self.assertTrue(0 in cut_set)
self.assertTrue(1 in cut_set)
self.assertTrue(2 in cut_set)
if __name__ == '__main__':
unittest.main()
| |
import os
from unittest.mock import patch
import pytest
from PIL import Image as PILImage
from sigal import init_logging
from sigal.gallery import Image
from sigal.image import (
generate_image,
generate_thumbnail,
get_exif_data,
get_exif_tags,
get_iptc_data,
get_image_metadata,
get_size,
process_image,
)
from sigal.settings import Status, create_settings
CURRENT_DIR = os.path.dirname(__file__)
SRCDIR = os.path.join(CURRENT_DIR, 'sample', 'pictures')
TEST_IMAGE = 'KeckObservatory20071020.jpg'
SRCFILE = os.path.join(SRCDIR, 'dir2', TEST_IMAGE)
TEST_GIF_IMAGE = 'example.gif'
SRC_GIF_FILE = os.path.join(SRCDIR, 'dir1', 'test1', TEST_GIF_IMAGE)
def test_process_image(tmpdir):
"Test the process_image function."
status = process_image(Image('foo.txt', 'bar', create_settings()))
assert status == Status.FAILURE
settings = create_settings(
img_processor='ResizeToFill',
make_thumbs=False,
source=os.path.join(SRCDIR, 'dir2'),
destination=str(tmpdir),
)
image = Image(TEST_IMAGE, '.', settings)
status = process_image(image)
assert status == Status.SUCCESS
im = PILImage.open(os.path.join(str(tmpdir), TEST_IMAGE))
assert im.size == settings['img_size']
def test_generate_image(tmpdir):
"Test the generate_image function."
dstfile = str(tmpdir.join(TEST_IMAGE))
for i, size in enumerate([(600, 600), (300, 200)]):
settings = create_settings(
img_size=size, img_processor='ResizeToFill', copy_exif_data=True
)
options = None if i == 0 else {'quality': 85}
generate_image(SRCFILE, dstfile, settings, options=options)
im = PILImage.open(dstfile)
assert im.size == size
def test_generate_image_imgformat(tmpdir):
"Test the effects of the img_format setting on generate_image."
dstfile = str(tmpdir.join(TEST_IMAGE))
for i, outfmt in enumerate(["JPEG", "PNG", "TIFF"]):
settings = create_settings(
img_size=(300, 300),
img_processor='ResizeToFill',
copy_exif_data=True,
img_format=outfmt,
)
options = {'quality': 85}
generate_image(SRCFILE, dstfile, settings, options=options)
im = PILImage.open(dstfile)
assert im.format == outfmt
def test_resize_image_portrait(tmpdir):
"""Test that the area is the same regardless of aspect ratio."""
size = (300, 200)
settings = create_settings(img_size=size)
portrait_image = 'm57_the_ring_nebula-587px.jpg'
portrait_src = os.path.join(
CURRENT_DIR, 'sample', 'pictures', 'dir2', portrait_image
)
portrait_dst = str(tmpdir.join(portrait_image))
generate_image(portrait_src, portrait_dst, settings)
im = PILImage.open(portrait_dst)
# In the default mode, PILKit resizes in a way to never make an image
# smaller than either of the lengths, the other is scaled accordingly.
# Hence we test that the shorter side has the smallest length.
assert im.size[0] == 200
landscape_image = 'KeckObservatory20071020.jpg'
landscape_src = os.path.join(
CURRENT_DIR, 'sample', 'pictures', 'dir2', landscape_image
)
landscape_dst = str(tmpdir.join(landscape_image))
generate_image(landscape_src, landscape_dst, settings)
im = PILImage.open(landscape_dst)
assert im.size[1] == 200
@pytest.mark.parametrize(
("image", "path"), [(TEST_IMAGE, SRCFILE), (TEST_GIF_IMAGE, SRC_GIF_FILE)]
)
def test_generate_image_passthrough(tmpdir, image, path):
"Test the generate_image function with use_orig=True."
dstfile = str(tmpdir.join(image))
settings = create_settings(use_orig=True)
generate_image(path, dstfile, settings)
# Check the file was copied, not (sym)linked
st_src = os.stat(path)
st_dst = os.stat(dstfile)
assert st_src.st_size == st_dst.st_size
assert not os.path.samestat(st_src, st_dst)
def test_generate_image_passthrough_symlink(tmpdir):
"Test the generate_image function with use_orig=True and orig_link=True."
dstfile = str(tmpdir.join(TEST_IMAGE))
settings = create_settings(use_orig=True, orig_link=True)
generate_image(SRCFILE, dstfile, settings)
# Check the file was symlinked
assert os.path.islink(dstfile)
assert os.path.samefile(SRCFILE, dstfile)
def test_generate_image_processor(tmpdir):
"Test generate_image with a wrong processor name."
init_logging('sigal')
dstfile = str(tmpdir.join(TEST_IMAGE))
settings = create_settings(img_size=(200, 200), img_processor='WrongMethod')
with pytest.raises(SystemExit):
generate_image(SRCFILE, dstfile, settings)
@pytest.mark.parametrize(
("image", "path", "wide_size", "high_size"),
[
(TEST_IMAGE, SRCFILE, (200, 133), (150, 100)),
(TEST_GIF_IMAGE, SRC_GIF_FILE, (134, 150), (150, 168)),
],
)
def test_generate_thumbnail(tmpdir, image, path, wide_size, high_size):
"Test the generate_thumbnail function."
dstfile = str(tmpdir.join(image))
for size in [(200, 150), (150, 200)]:
generate_thumbnail(path, dstfile, size)
im = PILImage.open(dstfile)
assert im.size == size
for size, thumb_size in [((200, 150), wide_size), ((150, 200), high_size)]:
generate_thumbnail(path, dstfile, size, fit=False)
im = PILImage.open(dstfile)
assert im.size == thumb_size
def test_get_exif_tags():
test_image = '11.jpg'
src_file = os.path.join(
CURRENT_DIR, 'sample', 'pictures', 'dir1', 'test1', test_image
)
data = get_exif_data(src_file)
simple = get_exif_tags(data, datetime_format='%d/%m/%Y')
assert simple['fstop'] == 3.9
assert simple['focal'] == 12.0
assert simple['iso'] == 50
assert simple['Make'] == 'NIKON'
assert simple['datetime'] == '22/01/2006'
try:
# Pillow 7.2+
assert simple['exposure'] == '0.00100603'
except Exception:
assert simple['exposure'] == '100603/100000000'
data = {'FNumber': [1, 0], 'FocalLength': [1, 0], 'ExposureTime': 10}
simple = get_exif_tags(data)
assert 'fstop' not in simple
assert 'focal' not in simple
assert simple['exposure'] == '10'
data = {
'ExposureTime': '--',
'DateTimeOriginal': '---',
'GPSInfo': {
'GPSLatitude': ((34, 0), (1, 0), (4500, 100)),
'GPSLatitudeRef': 'N',
'GPSLongitude': ((116, 0), (8, 0), (3900, 100)),
'GPSLongitudeRef': 'W',
},
}
simple = get_exif_tags(data)
assert 'exposure' not in simple
assert 'datetime' not in simple
assert 'gps' not in simple
def test_get_iptc_data(caplog):
test_image = '1.jpg'
src_file = os.path.join(CURRENT_DIR, 'sample', 'pictures', 'iptcTest', test_image)
data = get_iptc_data(src_file)
# Title
assert (
data["title"]
== 'Haemostratulus clouds over Canberra - ' + '2005-12-28 at 03-25-07'
)
# Description
assert (
data["description"]
== '"Haemo" because they look like haemoglobin '
+ 'cells and "stratulus" because I can\'t work out whether '
+ 'they\'re Stratus or Cumulus clouds.\nWe\'re driving down '
+ 'the main drag in Canberra so it\'s Parliament House that '
+ 'you can see at the end of the road.'
)
# This file has no IPTC data
test_image = '21.jpg'
src_file = os.path.join(CURRENT_DIR, 'sample', 'pictures', 'exifTest', test_image)
assert get_iptc_data(src_file) == {}
# Headline
test_image = '3.jpg'
src_file = os.path.join(CURRENT_DIR, 'sample', 'pictures', 'iptcTest', test_image)
data = get_iptc_data(src_file)
assert data["headline"] == 'Ring Nebula, M57'
# Test catching the SyntaxError -- assert output
with patch('sigal.image.IptcImagePlugin.getiptcinfo', side_effect=SyntaxError):
get_iptc_data(src_file)
assert ['IPTC Error in'] == [log.message[:13] for log in caplog.records]
def test_get_image_metadata_exceptions():
# image does not exist
test_image = 'bad_image.jpg'
src_file = os.path.join(CURRENT_DIR, 'sample', test_image)
data = get_image_metadata(src_file)
assert data == {'exif': {}, 'iptc': {}, 'size': {}}
def test_iso_speed_ratings():
data = {'ISOSpeedRatings': ()}
simple = get_exif_tags(data)
assert 'iso' not in simple
data = {'ISOSpeedRatings': None}
simple = get_exif_tags(data)
assert 'iso' not in simple
data = {'ISOSpeedRatings': 125}
simple = get_exif_tags(data)
assert 'iso' in simple
def test_null_exposure_time():
data = {'ExposureTime': (0, 0)}
simple = get_exif_tags(data)
assert 'exposure' not in simple
def test_exif_copy(tmpdir):
"Test if EXIF data can transferred copied to the resized image."
test_image = '11.jpg'
src_file = os.path.join(
CURRENT_DIR, 'sample', 'pictures', 'dir1', 'test1', test_image
)
dst_file = str(tmpdir.join(test_image))
settings = create_settings(img_size=(300, 400), copy_exif_data=True)
generate_image(src_file, dst_file, settings)
simple = get_exif_tags(get_exif_data(dst_file))
assert simple['iso'] == 50
settings['copy_exif_data'] = False
generate_image(src_file, dst_file, settings)
simple = get_exif_tags(get_exif_data(dst_file))
assert not simple
def test_exif_gps(tmpdir):
"""Test reading out correct geo tags"""
test_image = 'flickr_jerquiaga_2394751088_cc-by-nc.jpg'
src_file = os.path.join(
CURRENT_DIR, 'sample', 'pictures', 'dir1', 'test1', test_image
)
dst_file = str(tmpdir.join(test_image))
settings = create_settings(img_size=(400, 300), copy_exif_data=True)
generate_image(src_file, dst_file, settings)
simple = get_exif_tags(get_exif_data(dst_file))
assert 'gps' in simple
lat = 34.029167
lon = -116.144167
assert abs(simple['gps']['lat'] - lat) < 0.0001
assert abs(simple['gps']['lon'] - lon) < 0.0001
def test_get_size(tmpdir):
"""Test reading out image size"""
test_image = 'flickr_jerquiaga_2394751088_cc-by-nc.jpg'
src_file = os.path.join(
CURRENT_DIR, 'sample', 'pictures', 'dir1', 'test1', test_image
)
result = get_size(src_file)
assert result == {'height': 800, 'width': 600}
def test_get_size_with_invalid_path(tmpdir):
"""Test reading out image size with a missing file"""
test_image = 'missing-file.jpg'
src_file = os.path.join(CURRENT_DIR, test_image)
result = get_size(src_file)
assert result is None
| |
"""
Various data structures used in query construction.
Factored out from django.db.models.query to avoid making the main module very
large and/or so that they can be used by other modules without getting into
circular import difficulties.
"""
from __future__ import unicode_literals
import inspect
from collections import namedtuple
from django.core.exceptions import FieldDoesNotExist
from django.db.models.constants import LOOKUP_SEP
from django.utils import tree
from django.utils.lru_cache import lru_cache
# PathInfo is used when converting lookups (fk__somecol). The contents
# describe the relation in Model terms (model Options and Fields for both
# sides of the relation. The join_field is the field backing the relation.
PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2m direct')
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
pass
def subclasses(cls):
yield cls
# Python 2 lacks 'yield from', which could replace the inner loop
for subclass in cls.__subclasses__():
# yield from subclasses(subclass)
for item in subclasses(subclass):
yield item
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
contains_aggregate = False
def __init__(self, sql, params):
self.data = sql, list(params)
def as_sql(self, compiler=None, connection=None):
return self.data
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + list(kwargs.items()))
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = type(self)()
obj.connector = conn
obj.add(self, conn)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = type(self)()
obj.add(self, self.AND)
obj.negate()
return obj
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# We must promote any new joins to left outer joins so that when Q is
# used as an expression, rows aren't filtered due to joins.
clause, joins = query._add_q(self, reuse, allow_joins=allow_joins, split_subq=False)
query.promote_joins(joins)
return clause
class DeferredAttribute(object):
"""
A wrapper for a deferred-loading field. When the value is read from this
object the first time, the query is executed.
"""
def __init__(self, field_name, model):
self.field_name = field_name
def __get__(self, instance, cls=None):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
"""
if instance is None:
return self
opts = instance._meta
data = instance.__dict__
if data.get(self.field_name, self) is self:
# self.field_name is the attname of the field, but only() takes the
# actual name, so we need to translate it here.
try:
f = opts.get_field(self.field_name)
except FieldDoesNotExist:
f = [f for f in opts.fields if f.attname == self.field_name][0]
name = f.name
# Let's see if the field is part of the parent chain. If so we
# might be able to reuse the already loaded value. Refs #18343.
val = self._check_parent_chain(instance, name)
if val is None:
instance.refresh_from_db(fields=[self.field_name])
val = getattr(instance, self.field_name)
data[self.field_name] = val
return data[self.field_name]
def _check_parent_chain(self, instance, name):
"""
Check if the field value can be fetched from a parent field already
loaded in the instance. This can be done if the to-be fetched
field is a primary key field.
"""
opts = instance._meta
f = opts.get_field(name)
link_field = opts.get_ancestor_link(f.model)
if f.primary_key and f != link_field:
return getattr(instance, link_field.attname)
return None
class RegisterLookupMixin(object):
@classmethod
def _get_lookup(cls, lookup_name):
return cls.get_lookups().get(lookup_name, None)
@classmethod
@lru_cache(maxsize=None)
def get_lookups(cls):
class_lookups = [parent.__dict__.get('class_lookups', {}) for parent in inspect.getmro(cls)]
return cls.merge_dicts(class_lookups)
def get_lookup(self, lookup_name):
from django.db.models.lookups import Lookup
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_lookup(lookup_name)
if found is not None and not issubclass(found, Lookup):
return None
return found
def get_transform(self, lookup_name):
from django.db.models.lookups import Transform
found = self._get_lookup(lookup_name)
if found is None and hasattr(self, 'output_field'):
return self.output_field.get_transform(lookup_name)
if found is not None and not issubclass(found, Transform):
return None
return found
@staticmethod
def merge_dicts(dicts):
"""
Merge dicts in reverse to preference the order of the original list. e.g.,
merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.
"""
merged = {}
for d in reversed(dicts):
merged.update(d)
return merged
@classmethod
def _clear_cached_lookups(cls):
for subclass in subclasses(cls):
subclass.get_lookups.cache_clear()
@classmethod
def register_lookup(cls, lookup, lookup_name=None):
if lookup_name is None:
lookup_name = lookup.lookup_name
if 'class_lookups' not in cls.__dict__:
cls.class_lookups = {}
cls.class_lookups[lookup_name] = lookup
cls._clear_cached_lookups()
return lookup
@classmethod
def _unregister_lookup(cls, lookup, lookup_name=None):
"""
Remove given lookup from cls lookups. For use in tests only as it's
not thread-safe.
"""
if lookup_name is None:
lookup_name = lookup.lookup_name
del cls.class_lookups[lookup_name]
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
Arguments:
* field - the field to be checked
* restricted - a boolean field, indicating if the field list has been
manually restricted using a requested clause)
* requested - The select_related() dictionary.
* load_fields - the set of fields to be loaded on this model
* reverse - boolean, True if we are checking a reverse select related
"""
if not field.remote_field:
return False
if field.remote_field.parent_link and not reverse:
return False
if restricted:
if reverse and field.related_query_name() not in requested:
return False
if not reverse and field.name not in requested:
return False
if not restricted and field.null:
return False
if load_fields:
if field.attname not in load_fields:
if restricted and field.name in requested:
raise InvalidQuery("Field %s.%s cannot be both deferred"
" and traversed using select_related"
" at the same time." %
(field.model._meta.object_name, field.name))
return True
def refs_expression(lookup_parts, annotations):
"""
A helper method to check if the lookup_parts contains references
to the given annotations set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])
if level_n_lookup in annotations and annotations[level_n_lookup]:
return annotations[level_n_lookup], lookup_parts[n:]
return False, ()
def check_rel_lookup_compatibility(model, target_opts, field):
"""
Check that self.model is compatible with target_opts. Compatibility
is OK if:
1) model and opts match (where proxy inheritance is removed)
2) model is parent of opts' model or the other way around
"""
def check(opts):
return (
model._meta.concrete_model == opts.concrete_model or
opts.concrete_model in model._meta.get_parent_list() or
model in opts.get_parent_list()
)
# If the field is a primary key, then doing a query against the field's
# model is ok, too. Consider the case:
# class Restaurant(models.Model):
# place = OnetoOneField(Place, primary_key=True):
# Restaurant.objects.filter(pk__in=Restaurant.objects.all()).
# If we didn't have the primary key check, then pk__in (== place__in) would
# give Place's opts as the target opts, but Restaurant isn't compatible
# with that. This logic applies only to primary keys, as when doing __in=qs,
# we are going to turn this into __in=qs.values('pk') later on.
return (
check(target_opts) or
(getattr(field, 'primary_key', False) and check(field.model._meta))
)
| |
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, assert_, assert_raises, assert_equal, assert_warns,
assert_no_warnings, assert_array_equal, assert_array_almost_equal,
suppress_warnings
)
from numpy import random
import sys
import warnings
class TestSeed(object):
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
s = np.random.RandomState(4294967295)
assert_equal(s.randint(1000), 419)
def test_array(self):
s = np.random.RandomState(range(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState(np.arange(10))
assert_equal(s.randint(1000), 468)
s = np.random.RandomState([0])
assert_equal(s.randint(1000), 973)
s = np.random.RandomState([4294967295])
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
# seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
assert_raises(ValueError, np.random.RandomState, [1, 2, 4294967296])
assert_raises(ValueError, np.random.RandomState, [1, -2, 4294967296])
def test_invalid_array_shape(self):
# gh-9832
assert_raises(ValueError, np.random.RandomState, np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
class TestBinomial(object):
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
# This test addresses issue #3480.
zeros = np.zeros(2, dtype='int')
for p in [0, .5, 1]:
assert_(random.binomial(0, p) == 0)
assert_array_equal(random.binomial(zeros, p), zeros)
def test_p_is_nan(self):
# Issue #4571.
assert_raises(ValueError, random.binomial, 1, np.nan)
class TestMultinomial(object):
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
def test_zero_probability(self):
random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
def test_int_negative_interval(self):
assert_(-5 <= random.randint(-5, -1) < -1)
x = random.randint(-5, -1, 5)
assert_(np.all(-5 <= x))
assert_(np.all(x < -1))
def test_size(self):
# gh-3173
p = [0.5, 0.5]
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
assert_raises(TypeError, np.random.multinomial, 1, p,
float(1))
class TestSetState(object):
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
self.state = self.prng.get_state()
def test_basic(self):
old = self.prng.tomaxint(16)
self.prng.set_state(self.state)
new = self.prng.tomaxint(16)
assert_(np.all(old == new))
def test_gaussian_reset(self):
# Make sure the cached every-other-Gaussian is reset.
old = self.prng.standard_normal(size=3)
self.prng.set_state(self.state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_gaussian_reset_in_media_res(self):
# When the state is saved with a cached Gaussian, make sure the
# cached Gaussian is restored.
self.prng.standard_normal()
state = self.prng.get_state()
old = self.prng.standard_normal(size=3)
self.prng.set_state(state)
new = self.prng.standard_normal(size=3)
assert_(np.all(old == new))
def test_backwards_compatibility(self):
# Make sure we can accept old state tuples that do not have the
# cached Gaussian value.
old_state = self.state[:-2]
x1 = self.prng.standard_normal(size=16)
self.prng.set_state(old_state)
x2 = self.prng.standard_normal(size=16)
self.prng.set_state(self.state)
x3 = self.prng.standard_normal(size=16)
assert_(np.all(x1 == x2))
assert_(np.all(x1 == x3))
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
self.prng.negative_binomial(0.5, 0.5)
class TestRandint(object):
rfunc = np.random.randint
# valid integer/boolean types
itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
def test_unsupported_type(self):
assert_raises(TypeError, self.rfunc, 1, dtype=float)
def test_bounds_checking(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
def test_rng_zero_and_extremes(self):
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
tgt = ubnd - 1
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = lbnd
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
tgt = (lbnd + ubnd)//2
assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
def test_full_range(self):
# Test for ticket #1690
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
try:
self.rfunc(lbnd, ubnd, dtype=dt)
except Exception as e:
raise AssertionError("No error should have been raised, "
"but one was with the following "
"message:\n\n%s" % str(e))
def test_in_bounds_fuzz(self):
# Don't use fixed seed
np.random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
assert_(vals.max() < ubnd)
assert_(vals.min() >= 2)
vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
assert_(vals.max() < 2)
assert_(vals.min() >= 0)
def test_repeatability(self):
import hashlib
# We use a md5 hash of generated sequences of 1000 samples
# in the range [0, 6) for all but bool, where the range
# is [0, 2). Hashes are for little endian numbers.
tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
'int16': '1b7741b80964bb190c50d541dca1cac1',
'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
'int64': '17db902806f448331b5a758d7d2ee672',
'int8': '27dd30c4e08a797063dffac2490b0be6',
'uint16': '1b7741b80964bb190c50d541dca1cac1',
'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
'uint64': '17db902806f448331b5a758d7d2ee672',
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
np.random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
val = self.rfunc(0, 6, size=1000, dtype=dt)
else:
val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianess
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
def test_int64_uint64_corner_case(self):
# When stored in Numpy arrays, `lbnd` is casted
# as np.int64, and `ubnd` is casted as np.uint64.
# Checking whether `lbnd` >= `ubnd` used to be
# done solely via direct comparison, which is incorrect
# because when Numpy tries to compare both numbers,
# it casts both to np.float64 because there is
# no integer superset of np.int64 and np.uint64. However,
# `ubnd` is too large to be represented in np.float64,
# causing it be round down to np.iinfo(np.int64).max,
# leading to a ValueError because `lbnd` now equals
# the new `ubnd`.
dt = np.int64
tgt = np.iinfo(np.int64).max
lbnd = np.int64(np.iinfo(np.int64).max)
ubnd = np.uint64(np.iinfo(np.int64).max + 1)
# None of these function calls should
# generate a ValueError now.
actual = np.random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
# See gh-7203
for dt in self.itype:
lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
for dt in (bool, int, np.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
# gh-7284: Ensure that we get Python data types
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_(not hasattr(sample, 'dtype'))
assert_equal(type(sample), dt)
class TestRandomDist(object):
# Make sure the random distribution returns the correct value for a
# given seed
def setup(self):
self.seed = 1234567890
def test_rand(self):
np.random.seed(self.seed)
actual = np.random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randn(self):
np.random.seed(self.seed)
actual = np.random.randn(3, 2)
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_randint(self):
np.random.seed(self.seed)
actual = np.random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers(self):
np.random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
assert_array_equal(actual, desired)
def test_random_integers_max_int(self):
# Tests whether random_integers can generate the
# maximum allowed Python int that can be converted
# into a C long. Previous implementations of this
# method have thrown an OverflowError when attempting
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
actual = np.random.random_integers(np.iinfo('l').max,
np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
assert_equal(actual, desired)
def test_random_integers_deprecated(self):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
np.random.seed(self.seed)
actual = np.random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_choice_uniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
np.random.seed(self.seed)
actual = np.random.choice(4, 3, replace=False,
p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
np.random.seed(self.seed)
actual = np.random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
sample = np.random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
assert_raises(ValueError, sample, [], 3)
assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
p=[[0.25, 0.25], [0.25, 0.25]])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
assert_(np.isscalar(np.random.choice(2, replace=True)))
assert_(np.isscalar(np.random.choice(2, replace=False)))
assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
assert_(np.random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
assert_(np.random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
assert_(np.random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
assert_equal(np.random.choice(6, s, replace=True).shape, s)
assert_equal(np.random.choice(6, s, replace=False).shape, s)
assert_equal(np.random.choice(6, s, replace=True, p=p).shape, s)
assert_equal(np.random.choice(6, s, replace=False, p=p).shape, s)
assert_equal(np.random.choice(np.arange(6), s, replace=True).shape, s)
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
def test_shuffle(self):
# Test lists, arrays (of various dtypes), and multidimensional versions
# of both, c-contiguous or not:
for conv in [lambda x: np.array([]),
lambda x: x,
lambda x: np.asarray(x).astype(np.int8),
lambda x: np.asarray(x).astype(np.float32),
lambda x: np.asarray(x).astype(np.complex64),
lambda x: np.asarray(x).astype(object),
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
def test_shuffle_masked(self):
# gh-3263
a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
np.random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
np.random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
def test_beta(self):
np.random.seed(self.seed)
actual = np.random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
[1.58405155108498093e-04, 1.26252891949397652e-04]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
np.random.seed(self.seed)
actual = np.random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
np.random.seed(self.seed)
actual = np.random.chisquare(50, size=(3, 2))
desired = np.array([[63.87858175501090585, 68.68407748911370447],
[65.77116116901505904, 47.09686762438974483],
[72.3828403199695174, 74.18408615260374006]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
np.random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
actual = np.random.mtrand.dirichlet(alpha, size=(3, 2))
desired = np.array([[[0.54539444573611562, 0.45460555426388438],
[0.62345816822039413, 0.37654183177960598]],
[[0.55206000085785778, 0.44793999914214233],
[0.58964023305154301, 0.41035976694845688]],
[[0.59266909280647828, 0.40733090719352177],
[0.56974431743975207, 0.43025568256024799]]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, np.uint32(1)).shape, (1, 2))
assert_equal(np.random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
assert_equal(np.random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
assert_raises(TypeError, np.random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
assert_raises(ValueError, np.random.mtrand.dirichlet, alpha)
def test_exponential(self):
np.random.seed(self.seed)
actual = np.random.exponential(1.1234, size=(3, 2))
desired = np.array([[1.08342649775011624, 1.00607889924557314],
[2.46628830085216721, 2.49668106809923884],
[0.68717433461363442, 1.69175666993575979]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
assert_equal(np.random.exponential(scale=0), 0)
assert_raises(ValueError, np.random.exponential, scale=-0.)
def test_f(self):
np.random.seed(self.seed)
actual = np.random.f(12, 77, size=(3, 2))
desired = np.array([[1.21975394418575878, 1.75135759791559775],
[1.44803115017146489, 1.22108959480396262],
[1.02176975757740629, 1.34431827623300415]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
np.random.seed(self.seed)
actual = np.random.gamma(5, 3, size=(3, 2))
desired = np.array([[24.60509188649287182, 28.54993563207210627],
[26.13476110204064184, 12.56988482927716078],
[31.71863275789960568, 33.30143302795922011]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
assert_equal(np.random.gamma(shape=0, scale=0), 0)
assert_raises(ValueError, np.random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
np.random.seed(self.seed)
actual = np.random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
np.random.seed(self.seed)
actual = np.random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
assert_equal(np.random.gumbel(scale=0), 0)
assert_raises(ValueError, np.random.gumbel, scale=-0.)
def test_hypergeometric(self):
np.random.seed(self.seed)
actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
actual = np.random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
actual = np.random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
actual = np.random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
np.random.seed(self.seed)
actual = np.random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
assert_equal(np.random.laplace(scale=0), 0)
assert_raises(ValueError, np.random.laplace, scale=-0.)
def test_logistic(self):
np.random.seed(self.seed)
actual = np.random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
np.random.seed(self.seed)
actual = np.random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
desired = np.array([[16.50698631688883822, 36.54846706092654784],
[22.67886599981281748, 0.71617561058995771],
[65.72798501792723869, 86.84341601437161273]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
assert_equal(np.random.lognormal(sigma=0), 1)
assert_raises(ValueError, np.random.lognormal, sigma=-0.)
def test_logseries(self):
np.random.seed(self.seed)
actual = np.random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
np.random.seed(self.seed)
actual = np.random.multinomial(20, [1/6.]*6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
[2, 1, 4, 3, 6, 4]],
[[4, 4, 2, 5, 2, 3],
[4, 3, 4, 2, 3, 4]]])
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
np.random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
desired = np.array([[[1.463620246718631, 11.73759122771936 ],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
[[0.689515026297799, 9.880729819607714],
[-0.023054015651998, 9.201096623542879]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
actual = np.random.multivariate_normal(mean, cov)
desired = np.array([0.895289569463708, 9.17180864067987])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, np.random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(np.random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
desired = np.array([[848, 841],
[892, 611],
[779, 647]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
desired = np.array([[23.91905354498517511, 13.35324692733826346],
[31.22452661329736401, 16.60047399466177254],
[5.03461598262724586, 17.94973089023519464]])
assert_array_almost_equal(actual, desired, decimal=14)
actual = np.random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
desired = np.array([[1.47145377828516666, 0.15052899268012659],
[0.00943803056963588, 1.02647251615666169],
[0.332334982684171, 0.15451287602753125]])
assert_array_almost_equal(actual, desired, decimal=14)
np.random.seed(self.seed)
actual = np.random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
desired = np.array([[9.597154162763948, 11.725484450296079],
[10.413711048138335, 3.694475922923986],
[13.484222138963087, 14.377255424602957]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
np.random.seed(self.seed)
actual = np.random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
desired = np.array([[1.40598099674926669, 0.34207973179285761],
[3.57715069265772545, 7.92632662577829805],
[0.43741599463544162, 1.1774208752428319]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
np.random.seed(self.seed)
actual = np.random.normal(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[2.80378370443726244, 3.59863924443872163],
[3.121433477601256, -0.33382987590723379],
[4.18552478636557357, 4.46410668111310471]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
assert_equal(np.random.normal(scale=0), 0)
assert_raises(ValueError, np.random.normal, scale=-0.)
def test_pareto(self):
np.random.seed(self.seed)
actual = np.random.pareto(a=.123456789, size=(3, 2))
desired = np.array(
[[2.46852460439034849e+03, 1.41286880810518346e+03],
[5.28287797029485181e+07, 6.57720981047328785e+07],
[1.40840323350391515e+02, 1.98390255135251704e+05]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
# Consensus is that this is probably some gcc quirk that affects
# rounding but not in any important way, so we just use a looser
# tolerance on this test:
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
np.random.seed(self.seed)
actual = np.random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
assert_array_equal(actual, desired)
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
assert_raises(ValueError, np.random.poisson, lamneg)
assert_raises(ValueError, np.random.poisson, [lamneg]*10)
assert_raises(ValueError, np.random.poisson, lambig)
assert_raises(ValueError, np.random.poisson, [lambig]*10)
def test_power(self):
np.random.seed(self.seed)
actual = np.random.power(a=.123456789, size=(3, 2))
desired = np.array([[0.02048932883240791, 0.01424192241128213],
[0.38446073748535298, 0.39499689943484395],
[0.00177699707563439, 0.13115505880863756]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
np.random.seed(self.seed)
actual = np.random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
assert_equal(np.random.rayleigh(scale=0), 0)
assert_raises(ValueError, np.random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
np.random.seed(self.seed)
actual = np.random.standard_cauchy(size=(3, 2))
desired = np.array([[0.77127660196445336, -6.55601161955910605],
[0.93582023391158309, -2.07479293013759447],
[-4.74601644297011926, 0.18338989290760804]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
np.random.seed(self.seed)
actual = np.random.standard_exponential(size=(3, 2))
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_gamma(self):
np.random.seed(self.seed)
actual = np.random.standard_gamma(shape=3, size=(3, 2))
desired = np.array([[5.50841531318455058, 6.62953470301903103],
[5.93988484943779227, 2.31044849402133989],
[7.54838614231317084, 8.012756093271868]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_standard_gamma_0(self):
assert_equal(np.random.standard_gamma(shape=0), 0)
assert_raises(ValueError, np.random.standard_gamma, shape=-0.)
def test_standard_normal(self):
np.random.seed(self.seed)
actual = np.random.standard_normal(size=(3, 2))
desired = np.array([[1.34016345771863121, 1.73759122771936081],
[1.498988344300628, -0.2286433324536169],
[2.031033998682787, 2.17032494605655257]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_t(self):
np.random.seed(self.seed)
actual = np.random.standard_t(df=10, size=(3, 2))
desired = np.array([[0.97140611862659965, -0.08830486548450577],
[1.36311143689505321, -0.55317463909867071],
[-0.18473749069684214, 0.61181537341755321]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
np.random.seed(self.seed)
actual = np.random.triangular(left=5.12, mode=10.23, right=20.34,
size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
np.random.seed(self.seed)
actual = np.random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_uniform_range_bounds(self):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
func = np.random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
assert_raises(OverflowError, func, [-np.inf], [0])
assert_raises(OverflowError, func, [0], [np.inf])
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
np.random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
# when called with objects that throw exceptions when converted to
# scalars.
#
# Regression test for gh: 8865
class ThrowingFloat(np.ndarray):
def __float__(self):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
np.random.seed(self.seed)
actual = np.random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_vonmises_small(self):
# check infinite loop, gh-4720
np.random.seed(self.seed)
r = np.random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
np.testing.assert_(np.isfinite(r).all())
def test_wald(self):
np.random.seed(self.seed)
actual = np.random.wald(mean=1.23, scale=1.54, size=(3, 2))
desired = np.array([[3.82935265715889983, 5.13125249184285526],
[0.35045403618358717, 1.50832396872003538],
[0.24124319895843183, 0.22031101461955038]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
np.random.seed(self.seed)
actual = np.random.weibull(a=1.23, size=(3, 2))
desired = np.array([[0.97097342648766727, 0.91422896443565516],
[1.89517770034962929, 1.91414357960479564],
[0.67057783752390987, 1.39494046635066793]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
assert_equal(np.random.weibull(a=0), 0)
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
np.random.seed(self.seed)
actual = np.random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
assert_array_equal(actual, desired)
class TestBroadcast(object):
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
self.seed = 123456789
def setSeed(self):
np.random.seed(self.seed)
# TODO: Include test for randint once it can broadcast
# Can steal the test written in PR #6938
def test_uniform(self):
low = [0]
high = [1]
uniform = np.random.uniform
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = uniform(low * 3, high)
assert_array_almost_equal(actual, desired, decimal=14)
self.setSeed()
actual = uniform(low, high * 3)
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
loc = [0]
scale = [1]
bad_scale = [-1]
normal = np.random.normal
desired = np.array([2.2129019979039612,
2.1283977976520019,
1.8417114045748335])
self.setSeed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
self.setSeed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
beta = np.random.beta
desired = np.array([0.19843558305989056,
0.075230336409423643,
0.24976865978980844])
self.setSeed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
self.setSeed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a, b * 3)
assert_raises(ValueError, beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
exponential = np.random.exponential
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
std_gamma = np.random.standard_gamma
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
gamma = np.random.gamma
desired = np.array([1.5221370731769048,
1.5277256455738331,
1.4248762625178359])
self.setSeed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
self.setSeed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
f = np.random.f
desired = np.array([0.80038951638264799,
0.86768719635363512,
2.7251095168386801])
self.setSeed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
self.setSeed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
dfden = [3]
nonc = [4]
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
nonc_f = np.random.noncentral_f
desired = np.array([9.1393943263705211,
13.025456344595602,
8.8018098359100545])
self.setSeed()
actual = nonc_f(dfnum * 3, dfden, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
self.setSeed()
actual = nonc_f(dfnum, dfden, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
def test_noncentral_f_small_df(self):
self.setSeed()
desired = np.array([6.869638627492048, 0.785880199263955])
actual = np.random.noncentral_f(0.9, 0.9, 2, size=2)
assert_array_almost_equal(actual, desired, decimal=14)
def test_chisquare(self):
df = [1]
bad_df = [-1]
chisquare = np.random.chisquare
desired = np.array([0.57022801133088286,
0.51947702108840776,
0.1320969254923558])
self.setSeed()
actual = chisquare(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, chisquare, bad_df * 3)
def test_noncentral_chisquare(self):
df = [1]
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
nonc_chi = np.random.noncentral_chisquare
desired = np.array([9.0015599467913763,
4.5804135049718742,
6.0872302432834564])
self.setSeed()
actual = nonc_chi(df * 3, nonc)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
self.setSeed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
t = np.random.standard_t
desired = np.array([3.0702872575217643,
5.8560725167361607,
1.0274791436474273])
self.setSeed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
def test_vonmises(self):
mu = [2]
kappa = [1]
bad_kappa = [-1]
vonmises = np.random.vonmises
desired = np.array([2.9883443664201312,
-2.7064099483995943,
-1.8672476700665914])
self.setSeed()
actual = vonmises(mu * 3, kappa)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
self.setSeed()
actual = vonmises(mu, kappa * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
def test_pareto(self):
a = [1]
bad_a = [-1]
pareto = np.random.pareto
desired = np.array([1.1405622680198362,
1.1465519762044529,
1.0389564467453547])
self.setSeed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
weibull = np.random.weibull
desired = np.array([0.76106853658845242,
0.76386282278691653,
0.71243813125891797])
self.setSeed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
power = np.random.power
desired = np.array([0.53283302478975902,
0.53413660089041659,
0.50955303552646702])
self.setSeed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
def test_laplace(self):
loc = [0]
scale = [1]
bad_scale = [-1]
laplace = np.random.laplace
desired = np.array([0.067921356028507157,
0.070715642226971326,
0.019290950698972624])
self.setSeed()
actual = laplace(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc * 3, bad_scale)
self.setSeed()
actual = laplace(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, laplace, loc, bad_scale * 3)
def test_gumbel(self):
loc = [0]
scale = [1]
bad_scale = [-1]
gumbel = np.random.gumbel
desired = np.array([0.2730318639556768,
0.26936705726291116,
0.33906220393037939])
self.setSeed()
actual = gumbel(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc * 3, bad_scale)
self.setSeed()
actual = gumbel(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gumbel, loc, bad_scale * 3)
def test_logistic(self):
loc = [0]
scale = [1]
bad_scale = [-1]
logistic = np.random.logistic
desired = np.array([0.13152135837586171,
0.13675915696285773,
0.038216792802833396])
self.setSeed()
actual = logistic(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc * 3, bad_scale)
self.setSeed()
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
lognormal = np.random.lognormal
desired = np.array([9.1422086044848427,
8.4013952870126261,
6.3073234116578671])
self.setSeed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
self.setSeed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
bad_scale = [-1]
rayleigh = np.random.rayleigh
desired = np.array([1.2337491937897689,
1.2360119924878694,
1.1936818095781789])
self.setSeed()
actual = rayleigh(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, rayleigh, bad_scale * 3)
def test_wald(self):
mean = [0.5]
scale = [1]
bad_mean = [0]
bad_scale = [-2]
wald = np.random.wald
desired = np.array([0.11873681120271318,
0.12450084820795027,
0.9096122728408238])
self.setSeed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
self.setSeed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
right = [3]
mode = [2]
bad_left_one = [3]
bad_mode_one = [4]
bad_left_two, bad_mode_two = right * 2
triangular = np.random.triangular
desired = np.array([2.03339048710429,
2.0347400359389356,
2.0095991069536208])
self.setSeed()
actual = triangular(left * 3, mode, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
def test_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
binom = np.random.binomial
desired = np.array([1, 1, 1])
self.setSeed()
actual = binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n * 3, p)
assert_raises(ValueError, binom, n * 3, bad_p_one)
assert_raises(ValueError, binom, n * 3, bad_p_two)
self.setSeed()
actual = binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, binom, bad_n, p * 3)
assert_raises(ValueError, binom, n, bad_p_one * 3)
assert_raises(ValueError, binom, n, bad_p_two * 3)
def test_negative_binomial(self):
n = [1]
p = [0.5]
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
neg_binom = np.random.negative_binomial
desired = np.array([1, 0, 1])
self.setSeed()
actual = neg_binom(n * 3, p)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
self.setSeed()
actual = neg_binom(n, p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
max_lam = np.random.RandomState().poisson_lam_max
lam = [1]
bad_lam_one = [-1]
bad_lam_two = [max_lam * 2]
poisson = np.random.poisson
desired = np.array([1, 1, 0])
self.setSeed()
actual = poisson(lam * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, poisson, bad_lam_one * 3)
assert_raises(ValueError, poisson, bad_lam_two * 3)
def test_zipf(self):
a = [2]
bad_a = [0]
zipf = np.random.zipf
desired = np.array([2, 2, 1])
self.setSeed()
actual = zipf(a * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, zipf, bad_a * 3)
with np.errstate(invalid='ignore'):
assert_raises(ValueError, zipf, np.nan)
assert_raises(ValueError, zipf, [0, 0, np.nan])
def test_geometric(self):
p = [0.5]
bad_p_one = [-1]
bad_p_two = [1.5]
geom = np.random.geometric
desired = np.array([2, 2, 2])
self.setSeed()
actual = geom(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, geom, bad_p_one * 3)
assert_raises(ValueError, geom, bad_p_two * 3)
def test_hypergeometric(self):
ngood = [1]
nbad = [2]
nsample = [2]
bad_ngood = [-1]
bad_nbad = [-2]
bad_nsample_one = [0]
bad_nsample_two = [4]
hypergeom = np.random.hypergeometric
desired = np.array([1, 1, 1])
self.setSeed()
actual = hypergeom(ngood * 3, nbad, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad * 3, nsample)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
self.setSeed()
actual = hypergeom(ngood, nbad, nsample * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
bad_p_two = [-1]
logseries = np.random.logseries
desired = np.array([1, 1, 1])
self.setSeed()
actual = logseries(p * 3)
assert_array_equal(actual, desired)
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
def check_function(self, function, sz):
from threading import Thread
out1 = np.empty((len(self.seeds),) + sz)
out2 = np.empty((len(self.seeds),) + sz)
# threaded generation
t = [Thread(target=function, args=(np.random.RandomState(s), o))
for s, o in zip(self.seeds, out1)]
[x.start() for x in t]
[x.join() for x in t]
# the same serial
for s, o in zip(self.seeds, out2):
function(np.random.RandomState(s), o)
# these platforms change x87 fpu precision mode in threads
if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
assert_array_almost_equal(out1, out2)
else:
assert_array_equal(out1, out2)
def test_normal(self):
def gen_random(state, out):
out[...] = state.normal(size=10000)
self.check_function(gen_random, sz=(10000,))
def test_exp(self):
def gen_random(state, out):
out[...] = state.exponential(scale=np.ones((100, 1000)))
self.check_function(gen_random, sz=(100, 1000))
def test_multinomial(self):
def gen_random(state, out):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
self.argThree = np.array([4])
self.tgtShape = (1,)
def test_one_arg_funcs(self):
funcs = (np.random.exponential, np.random.standard_gamma,
np.random.chisquare, np.random.standard_t,
np.random.pareto, np.random.weibull,
np.random.power, np.random.rayleigh,
np.random.poisson, np.random.zipf,
np.random.geometric, np.random.logseries)
probfuncs = (np.random.geometric, np.random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(self.argOne)
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
funcs = (np.random.uniform, np.random.normal,
np.random.beta, np.random.gamma,
np.random.f, np.random.noncentral_chisquare,
np.random.vonmises, np.random.laplace,
np.random.gumbel, np.random.logistic,
np.random.lognormal, np.random.wald,
np.random.binomial, np.random.negative_binomial)
probfuncs = (np.random.binomial, np.random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = self.argTwo
out = func(self.argOne, argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], argTwo)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
# TODO: Uncomment once randint can broadcast arguments
# def test_randint(self):
# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
# np.int32, np.uint32, np.int64, np.uint64]
# func = np.random.randint
# high = np.array([1])
# low = np.array([0])
#
# for dt in itype:
# out = func(low, high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low[0], high, dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
#
# out = func(low, high[0], dtype=dt)
# self.assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
funcs = [np.random.noncentral_f, np.random.triangular,
np.random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne[0], self.argTwo, self.argThree)
assert_equal(out.shape, self.tgtShape)
out = func(self.argOne, self.argTwo[0], self.argThree)
assert_equal(out.shape, self.tgtShape)
if __name__ == "__main__":
run_module_suite()
| |
import json
import os
from mock import patch
from tests import FlexGetBase, MockManager
from flexget import __version__
from flexget.manager import Manager
from flexget.api import app, API_VERSION
from flexget.webserver import User
from flexget.utils.database import with_session
@with_session
def api_key(session=None):
user = session.query(User).first()
if not user:
user = User(name='flexget', password='flexget')
session.add(user)
session.commit()
return user.token
def append_header(key, value, kwargs):
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers'][key] = value
class APITest(FlexGetBase):
def __init__(self):
self.client = app.test_client()
FlexGetBase.__init__(self)
def json_post(self, *args, **kwargs):
append_header('Content-Type', 'application/json', kwargs)
if kwargs.get('auth', True):
append_header('Authorization', 'Token %s' % api_key(), kwargs)
return self.client.post(*args, **kwargs)
def get(self, *args, **kwargs):
if kwargs.get('auth', True):
append_header('Authorization', 'Token %s' % api_key(), kwargs)
return self.client.get(*args, **kwargs)
def delete(self, *args, **kwargs):
if kwargs.get('auth', True):
append_header('Authorization', 'Token %s' % api_key(), kwargs)
return self.client.delete(*args, **kwargs)
class TestServerAPI(APITest):
__yaml__ = """
tasks:
test:
rss:
url: http://test/rss
mock:
- title: entry 1
"""
def test_pid(self):
rsp = self.get('/server/pid/', headers={})
assert rsp.status_code == 200
assert json.loads(rsp.data) == {'pid': os.getpid()}
@patch.object(MockManager, 'load_config')
def test_reload(self, mocked_load_config):
rsp = self.get('/server/reload/')
assert rsp.status_code == 200
assert mocked_load_config.called
@patch.object(Manager, 'shutdown')
def test_shutdown(self, mocked_shutdown):
self.get('/server/shutdown/')
assert mocked_shutdown.called
def test_get_config(self):
rsp = self.get('/server/config/')
assert rsp.status_code == 200
assert json.loads(rsp.data) == {
'tasks': {
'test': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://test/rss'}
}
}
}
def test_version(self):
rsp = self.get('/server/version/')
assert rsp.status_code == 200
assert json.loads(rsp.data) == {'flexget_version': __version__, 'api_version': API_VERSION}
class TestTaskAPI(APITest):
__yaml__ = """
tasks:
test:
rss:
url: http://test/rss
mock:
- title: entry 1
"""
def test_list_tasks(self):
rsp = self.get('/tasks/')
data = json.loads(rsp.data)
assert data == {
'tasks': [
{
'name': 'test',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://test/rss'}
},
}
]
}
@patch.object(Manager, 'save_config')
def test_add_task(self, mocked_save_config):
new_task = {
'name': 'new_task',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://test/rss'}
}
}
rsp = self.json_post('/tasks/', data=json.dumps(new_task))
assert rsp.status_code == 201
assert mocked_save_config.called
assert json.loads(rsp.data) == new_task
assert self.manager.user_config['tasks']['new_task'] == new_task['config']
# With defaults
new_task['config']['rss']['ascii'] = False
new_task['config']['rss']['group_links'] = False
new_task['config']['rss']['silent'] = False
new_task['config']['rss']['all_entries'] = True
assert self.manager.config['tasks']['new_task'] == new_task['config']
def test_add_task_existing(self):
new_task = {
'name': 'test',
'config': {
'mock': [{'title': 'entry 1'}]
}
}
rsp = self.json_post('/tasks/', data=json.dumps(new_task))
assert rsp.status_code == 409
def test_get_task(self):
rsp = self.get('/tasks/test/')
data = json.loads(rsp.data)
assert data == {
'name': 'test',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://test/rss'}
},
}
@patch.object(Manager, 'save_config')
def test_update_task(self, mocked_save_config):
updated_task = {
'name': 'test',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://newurl/rss'}
}
}
rsp = self.json_post('/tasks/test/', data=json.dumps(updated_task))
assert rsp.status_code == 200
assert mocked_save_config.called
assert json.loads(rsp.data) == updated_task
assert self.manager.user_config['tasks']['test'] == updated_task['config']
# With defaults
updated_task['config']['rss']['ascii'] = False
updated_task['config']['rss']['group_links'] = False
updated_task['config']['rss']['silent'] = False
updated_task['config']['rss']['all_entries'] = True
assert self.manager.config['tasks']['test'] == updated_task['config']
@patch.object(Manager, 'save_config')
def test_rename_task(self, mocked_save_config):
updated_task = {
'name': 'new_test',
'config': {
'mock': [{'title': 'entry 1'}],
'rss': {'url': 'http://newurl/rss'}
}
}
rsp = self.json_post('/tasks/test/', data=json.dumps(updated_task))
assert rsp.status_code == 201
assert mocked_save_config.called
assert json.loads(rsp.data) == updated_task
assert 'test' not in self.manager.user_config['tasks']
assert 'test' not in self.manager.config['tasks']
assert self.manager.user_config['tasks']['new_test'] == updated_task['config']
# With defaults
updated_task['config']['rss']['ascii'] = False
updated_task['config']['rss']['group_links'] = False
updated_task['config']['rss']['silent'] = False
updated_task['config']['rss']['all_entries'] = True
assert self.manager.config['tasks']['new_test'] == updated_task['config']
@patch.object(Manager, 'save_config')
def test_delete_task(self, mocked_save_config):
rsp = self.delete('/tasks/test/')
assert rsp.status_code == 200
assert mocked_save_config.called
assert 'test' not in self.manager.user_config['tasks']
assert 'test' not in self.manager.config['tasks']
# TODO: Finish tests
| |
# Copyright 2013 Canonical Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
from oslo.config import cfg
from nova import exception
from nova.network import model as network_model
from nova import test
from nova.tests import matchers
from nova.tests import utils
from nova.tests.virt.vmwareapi import test_vm_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import network_util
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
CONF = cfg.CONF
class VMwareVifTestCase(test.NoDBTestCase):
def setUp(self):
super(VMwareVifTestCase, self).setUp()
self.flags(vlan_interface='vmnet0', group='vmware')
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
self.session = test_vm_util.fake_session()
self.cluster = None
def tearDown(self):
super(VMwareVifTestCase, self).tearDown()
def test_ensure_vlan_bridge(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True)
# FlatDHCP network mode without vlan - network doesn't exist with the host
def test_ensure_vlan_bridge_without_vlan(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(None)
network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0',
self.cluster).AndReturn('vmnet0')
network_util.check_if_vlan_interface_exists(self.session, 'vmnet0',
self.cluster).AndReturn(True)
network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0,
self.cluster)
network_util.get_network_with_the_name(self.session, 'fa0', None)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# FlatDHCP network mode without vlan - network exists with the host
# Get vswitch and check vlan interface should not be called
def test_ensure_vlan_bridge_with_network(self):
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
vm_network = {'name': 'VM Network', 'type': 'Network'}
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(vm_network)
self.mox.ReplayAll()
vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False)
# Flat network mode with DVS
def test_ensure_vlan_bridge_with_existing_dvs(self):
network_ref = {'dvpg': 'dvportgroup-2062',
'type': 'DistributedVirtualPortgroup'}
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
self.mox.StubOutWithMock(network_util,
'get_vswitch_for_vlan_interface')
self.mox.StubOutWithMock(network_util,
'check_if_vlan_interface_exists')
self.mox.StubOutWithMock(network_util, 'create_port_group')
network_util.get_network_with_the_name(self.session, 'fa0',
self.cluster).AndReturn(network_ref)
self.mox.ReplayAll()
ref = vif.ensure_vlan_bridge(self.session,
self.vif,
create_vlan=False)
self.assertThat(ref, matchers.DictMatches(network_ref))
def test_get_network_ref_neutron(self):
self.mox.StubOutWithMock(vif, 'get_neutron_network')
vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, True)
def test_get_network_ref_flat_dhcp(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=False)
self.mox.ReplayAll()
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge')
vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster,
create_vlan=True)
self.mox.ReplayAll()
network = network_model.Network(id=0,
bridge='fa0',
label='fake',
vlan=3,
bridge_interface='eth0',
injected=True,
should_create_vlan=True)
self.vif = network_model.NetworkInfo([
network_model.VIF(id=None,
address='DE:AD:BE:EF:00:00',
network=network,
type=None,
devname=None,
ovs_interfaceid=None,
rxtx_cap=3)
])[0]
vif.get_network_ref(self.session, self.cluster, self.vif, False)
def test_get_network_ref_bridge(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id',
'opaqueNetworkName': 'name',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertEqual('bridge_id', network_ref['network-id'])
def test_get_network_ref_bridges(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'bridge_id2',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id2')
self.assertEqual('bridge_id2', network_ref['network-id'])
def test_get_network_ref_integration(self):
opaque_networks = [{'opaqueNetworkId': 'integration_bridge',
'opaqueNetworkName': 'name',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertEqual('integration_bridge', network_ref['network-id'])
def test_get_network_ref_bridge_none(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'bridge_id2',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertIsNone(network_ref)
def test_get_network_ref_integration_multiple(self):
opaque_networks = [{'opaqueNetworkId': 'bridge_id1',
'opaqueNetworkName': 'name1',
'opaqueNetworkType': 'OpaqueNetwork'},
{'opaqueNetworkId': 'integration_bridge',
'opaqueNetworkName': 'name2',
'opaqueNetworkType': 'OpaqueNetwork'}]
network_ref = vif._get_network_ref_from_opaque(opaque_networks,
'integration_bridge', 'bridge_id')
self.assertIsNone(network_ref)
def test_get_neutron_network(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(opaque)
vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
CONF.vmware.integration_bridge,
self.vif['network']['id']).AndReturn('fake-network-ref')
self.mox.ReplayAll()
network_ref = vif.get_neutron_network(self.session,
self.vif['network']['id'],
self.cluster,
self.vif)
self.assertEqual(network_ref, 'fake-network-ref')
def test_get_neutron_network_opaque_network_not_found(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(vif, '_get_network_ref_from_opaque')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(opaque)
vif._get_network_ref_from_opaque(opaque.HostOpaqueNetworkInfo,
CONF.vmware.integration_bridge,
self.vif['network']['id']).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_neutron_network, self.session,
self.vif['network']['id'], self.cluster, self.vif)
def test_get_neutron_network_bridge_network_not_found(self):
self.mox.StubOutWithMock(vm_util, 'get_host_ref')
self.mox.StubOutWithMock(self.session, '_call_method')
self.mox.StubOutWithMock(network_util, 'get_network_with_the_name')
vm_util.get_host_ref(self.session,
self.cluster).AndReturn('fake-host')
opaque = fake.DataObject()
opaque.HostOpaqueNetworkInfo = ['fake-network-info']
self.session._call_method(vim_util, "get_dynamic_property",
'fake-host', 'HostSystem',
'config.network.opaqueNetwork').AndReturn(None)
network_util.get_network_with_the_name(self.session, 0,
self.cluster).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(exception.NetworkNotFoundForBridge,
vif.get_neutron_network, self.session,
self.vif['network']['id'], self.cluster, self.vif)
def test_create_port_group_already_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise error_util.AlreadyExistsException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
network_util.create_port_group(self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_create_port_group_exception(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'AddPortGroup':
raise error_util.VMwareDriverException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_add_vswitch_port_group_spec'),
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_add_vswitch, _get_host, _call_method):
self.assertRaises(error_util.VMwareDriverException,
network_util.create_port_group,
self.session, 'pg_name',
'vswitch_name', vlan_id=0,
cluster=None)
def test_get_neutron_network_invalid_property(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'get_dynamic_property':
raise error_util.InvalidPropertyException()
with contextlib.nested(
mock.patch.object(vm_util, 'get_host_ref'),
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(network_util, 'get_network_with_the_name')
) as (_get_host, _call_method, _get_name):
vif.get_neutron_network(self.session, 'network_name',
'cluster', 'vif')
def test_get_vif_info_none(self):
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', None)
self.assertEqual([], vif_info)
@mock.patch.object(vif, 'get_network_ref', return_value='fake_ref')
def test_get_vif_info(self, mock_get_network_ref):
network_info = utils.get_test_network_info()
vif_info = vif.get_vif_info('fake_session', 'fake_cluster',
'is_neutron', 'fake_model', network_info)
expected = [{'iface_id': 'vif-xxx-yyy-zzz',
'mac_address': 'fake',
'network_name': 'fake',
'network_ref': 'fake_ref',
'vif_model': 'fake_model'}]
self.assertEqual(expected, vif_info)
| |
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" test_template.py """
import json
import logging
import os
import time
import urllib
import signal
import subprocess
from collections import namedtuple
from ..common import status
# Test input. Please set each variable as it's own line, ended with \n, otherwise the value of lines
# passed into the topology will be incorrect, and the test will fail.
TEST_INPUT = ["1\n", "2\n", "3\n", "4\n", "5\n", "6\n", "7\n", "8\n",
"9\n", "10\n", "11\n", "12\n"]
# Retry variables in case the output is different from the input
RETRY_COUNT = 5
RETRY_INTERVAL = 10
# Topology shard definitions
NON_TMASTER_SHARD = 1
# Topology process name definitions
STMGR = 'stmgr'
HERON_BIN = "bin"
HERON_CORE = "heron-core"
HERON_METRICSMGR = 'metricsmgr'
HERON_SANDBOX_HOME = "."
HERON_STMGR = "heron-stmgr"
HERON_STMGR_CMD = os.path.join(HERON_SANDBOX_HOME, HERON_CORE, HERON_BIN, HERON_STMGR)
ProcessTuple = namedtuple('ProcessTuple', 'pid cmd')
class TestTemplate(object):
""" Class that encapsulates the template used for integration tests. Intended to be abstract and
subclassed for specific tests. """
def __init__(self, testname, params):
self.testname = testname
self.params = params
# pylint: disable=too-many-return-statements, too-many-branches,
# pylint: disable=too-many-statements
def run_test(self):
""" Runs the test template. Must either return TestSuccess or raise TestFailure"""
topology_submitted = False
try:
# prepare test data, start the topology and block until it's running
self._prepare_test_data()
self.submit_topology()
topology_submitted = True
_block_until_stmgr_running(self.get_expected_container_count())
self._block_until_topology_running(self.get_expected_min_instance_count())
# Execute the specific test logic and block until topology is running again
self.execute_test_case()
_block_until_stmgr_running(self.get_expected_container_count())
physical_plan_json =\
self._block_until_topology_running(self.get_expected_min_instance_count())
# trigger the test data to flow and invoke the pre_check_results hook
self._inject_test_data()
self.pre_check_results(physical_plan_json)
# finally verify the expected results
result = self._check_results()
return result
except status.TestFailure as e:
raise e
except Exception as e:
raise status.TestFailure("Exception thrown during test", e)
finally:
if topology_submitted:
self.cleanup_test()
def submit_topology(self):
_submit_topology(
self.params['cliPath'],
self.params['cluster'],
self.params['testJarPath'],
self.params['topologyClassPath'],
self.params['topologyName'],
self.params['readFile'],
self.params['outputFile']
)
# pylint: disable=no-self-use
def get_expected_container_count(self):
return 1
# pylint: disable=no-self-use
def get_expected_min_instance_count(self):
return 1
def execute_test_case(self):
pass
# pylint: disable=no-self-use,unused-argument
def pre_check_results(self, physical_plan_json):
return True
def cleanup_test(self):
try:
_kill_topology(self.params['cliPath'], self.params['cluster'], self.params['topologyName'])
except Exception as e:
logging.error("Failed to kill %s topology: %s", self.params['topologyName'], str(e))
finally:
self._delete_test_data_files()
def _delete_test_data_files(self):
_safe_delete_file(self.params['readFile'])
_safe_delete_file(self.params['outputFile'])
def _prepare_test_data(self):
self._delete_test_data_files()
# insert lines into temp file and then move to read file
try:
with open('temp.txt', 'w') as f:
for line in TEST_INPUT:
f.write(line)
except Exception as e:
logging.error("Failed to write to temp.txt file: %s", str(e))
return False
def _inject_test_data(self):
# move to read file. This guarantees contents will be put into the file the
# spout is reading from atomically
# which increases the determinism
os.rename('temp.txt', self.params['readFile'])
def _check_results(self):
""" get actual and expected result.
retry if results are not equal a predesignated amount of times
"""
expected_result = ""
actual_result = ""
retries_left = RETRY_COUNT
_sleep("before trying to check results for test %s" % self.testname, RETRY_INTERVAL)
while retries_left > 0:
retries_left -= 1
try:
with open(self.params['readFile'], 'r') as f:
expected_result = f.read()
with open(self.params['outputFile'], 'r') as g:
actual_result = g.read()
except Exception as e:
message =\
"Failed to read expected or actual results from file for test %s: %s" % self.testname
if retries_left == 0:
raise status.TestFailure(message, e)
logging.error(message, e)
# if we get expected result, no need to retry
expected_sorted = sorted(expected_result.split('\n'))
actual_sorted = sorted(actual_result.split('\n'))
if expected_sorted == actual_sorted:
break
if retries_left > 0:
expected_result = ""
actual_result = ""
expected_sorted = []
actual_sorted = []
logging.info("Failed to get expected results for test %s (attempt %s/%s), "\
+ "retrying after %s seconds",
self.testname, RETRY_COUNT - retries_left, RETRY_COUNT, RETRY_INTERVAL)
time.sleep(RETRY_INTERVAL)
# Compare the actual and expected result
if actual_sorted == expected_sorted:
success = status.TestSuccess(
"Actual result matched expected result for test %s" % self.testname)
logging.info("Actual result ---------- \n%s", actual_sorted)
logging.info("Expected result ---------- \n%s", expected_sorted)
return success
else:
failure = status.TestFailure(
"Actual result did not match expected result for test %s" % self.testname)
logging.info("Actual result ---------- \n%s", actual_sorted)
logging.info("Expected result ---------- \n%s", expected_sorted)
raise failure
# pylint: disable=no-self-use
def get_pid(self, process_name, heron_working_directory):
"""
opens .pid file of process and reads the first and only line, which should be the process pid
if fail, return -1
"""
process_pid_file = os.path.join(heron_working_directory, process_name + '.pid')
try:
with open(process_pid_file, 'r') as f:
pid = f.readline()
return pid
except Exception:
logging.error("Unable to open file %s", process_pid_file)
return -1
# pylint: disable=no-self-use
def kill_process(self, process_number):
""" kills process by running unix command kill """
if process_number < 1:
raise RuntimeError(
"Not attempting to kill process id < 1 passed to kill_process: %d" % process_number)
logging.info("Killing process number %s", process_number)
try:
os.kill(int(process_number), signal.SIGTERM)
except OSError as ex:
if "No such process" in str(ex): # killing a non-existing process condsidered as success
logging.info(str(ex))
else:
raise RuntimeError("Unable to kill process %s" % process_number)
except Exception:
raise RuntimeError("Unable to kill process %s" % process_number)
logging.info("Killed process number %s", process_number)
def kill_strmgr(self):
logging.info("Executing kill stream manager")
stmgr_pid = self.get_pid('%s-%d' % (STMGR, NON_TMASTER_SHARD), self.params['workingDirectory'])
self.kill_process(stmgr_pid)
def kill_metricsmgr(self):
logging.info("Executing kill metrics manager")
metricsmgr_pid = self.get_pid(
'%s-%d' % (HERON_METRICSMGR, NON_TMASTER_SHARD), self.params['workingDirectory'])
self.kill_process(metricsmgr_pid)
def _get_tracker_pplan(self):
url = 'http://localhost:%s/topologies/physicalplan?' % self.params['trackerPort']\
+ 'cluster=local&environ=default&topology=IntegrationTest_LocalReadWriteTopology'
logging.debug("Fetching physical plan from %s", url)
response = urllib.urlopen(url)
physical_plan_json = json.loads(response.read())
if 'result' not in physical_plan_json:
raise status.TestFailure(
"Could not find result json in physical plan request to tracker: %s" % url)
return physical_plan_json['result']
def _block_until_topology_running(self, min_instances):
retries_left = RETRY_COUNT
_sleep("before trying to fetch pplan for test %s" % self.testname, RETRY_INTERVAL)
while retries_left > 0:
retries_left -= 1
packing_plan = self._get_tracker_pplan()
if packing_plan:
instances_found = len(packing_plan['instances'])
if instances_found >= min_instances:
logging.info("Successfully fetched pplan from tracker for test %s after %s attempts.",
self.testname, RETRY_COUNT - retries_left)
return packing_plan
elif retries_left == 0:
raise status.TestFailure(
"Got pplan from tracker for test %s but the number of " % self.testname +
"instances found (%d) was less than min expected (%s)." %
(instances_found, min_instances))
if retries_left > 0:
_sleep("before trying again to fetch pplan for test %s (attempt %s/%s)" %
(self.testname, RETRY_COUNT - retries_left, RETRY_COUNT), RETRY_INTERVAL)
else:
raise status.TestFailure("Failed to get pplan from tracker for test %s after %s attempts."
% (self.testname, RETRY_COUNT))
def _block_until_stmgr_running(expected_stmgrs):
# block until ./heron-stmgr exists
process_list = _get_processes()
while not _processes_exists(process_list, HERON_STMGR_CMD, expected_stmgrs):
process_list = _get_processes()
time.sleep(1)
def _submit_topology(heron_cli_path, test_cluster, test_jar_path, topology_class_path,
topology_name, input_file, output_file):
""" Submit topology using heron-cli """
# unicode string messes up subprocess.call quotations, must change into string type
splitcmd = [
heron_cli_path, 'submit', '--verbose', '--', test_cluster, test_jar_path,
topology_class_path, topology_name, input_file, output_file, str(len(TEST_INPUT))
]
logging.info("Submitting topology: %s", splitcmd)
p = subprocess.Popen(splitcmd)
p.wait()
if p.returncode != 0:
raise status.TestFailure("Failed to submit topology %s" % topology_name)
logging.info("Submitted topology %s", topology_name)
def _kill_topology(heron_cli_path, test_cluster, topology_name):
""" Kill a topology using heron-cli """
splitcmd = [heron_cli_path, 'kill', test_cluster, topology_name]
logging.info("Killing topology: %s", ' '.join(splitcmd))
# this call can be blocking, no need for subprocess
if subprocess.call(splitcmd) != 0:
raise RuntimeError("Unable to kill the topology: %s" % topology_name)
def _get_processes():
"""
returns a list of process tuples (pid, cmd)
This only applies only for local scheduler as it uses the ps command
and assumes the topology will be running on different processes on same machine
"""
# pylint: disable=fixme
# TODO: if the submit fails before we get here (e.g., Topology already exists), this hangs
processes = subprocess.check_output(['ps', '-o', 'pid,args'])
processes = processes.split('\n')
processes = processes[1:] # remove first line, which is name of columns
process_list = []
for process in processes:
# remove empty lines
if process == '':
continue
pretuple = process.split(' ', 1)
process_list.append(ProcessTuple(pretuple[0], pretuple[1]))
return process_list
def _sleep(message, seconds):
logging.info("Sleeping for %d seconds %s", seconds, message)
time.sleep(seconds)
def _processes_exists(process_list, process_cmd, min_processes):
""" check if a process is running """
proccess_count = 0
for process in process_list:
if process_cmd in process.cmd:
proccess_count += 1
return proccess_count >= min_processes
def _safe_delete_file(file_name):
if os.path.isfile(file_name) and os.path.exists(file_name):
try:
os.remove(file_name)
except Exception as e:
logging.error("Failed to delete file: %s: %s", file_name, e)
return False
| |
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin, ChangedBehaviorWarning
from ..preprocessing import LabelEncoder
from ..multiclass import _ovr_decision_function
from ..utils import check_array, check_random_state, column_or_1d
from ..utils import ConvergenceWarning, compute_class_weight, deprecated
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted, NotFittedError
from ..externals import six
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, impl))
# FIXME Remove gamma=0.0 support in 0.18
if gamma == 0:
msg = ("gamma=%s has been deprecated in favor of "
"gamma='%s' as of 0.17. Backward compatibility"
" for gamma=%s will be removed in %s")
invalid_gamma = 0.0
warnings.warn(msg % (invalid_gamma, "auto", invalid_gamma, "0.18"),
DeprecationWarning)
self._impl = impl
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
kernel = self.kernel
return kernel == "precomputed" or callable(kernel)
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X = check_array(X, accept_sparse='csr', dtype=np.float64, order='C')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
# FIXME remove (self.gamma == 0) in 0.18
if (self.kernel in ['poly', 'rbf']) and ((self.gamma == 0) or
(self.gamma == 'auto')):
# if custom gamma is not provided ...
self._gamma = 1.0 / X.shape[1]
elif self.gamma == 'auto':
self._gamma = 0.0
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return column_or_1d(y, warn=True).astype(np.float64)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
@deprecated(" and will be removed in 0.19")
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train].
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise ValueError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(six.with_metaclass(ABCMeta, BaseLibSVM, ClassifierMixin)):
"""ABC for LibSVM-based classifiers."""
@abstractmethod
def __init__(self, impl, kernel, degree, gamma, coef0, tol, C, nu,
shrinking, probability, cache_size, class_weight, verbose,
max_iter, decision_function_shape, random_state):
self.decision_function_shape = decision_function_shape
super(BaseSVC, self).__init__(
impl=impl, kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
random_state=random_state)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
% len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes)
"""
dec = self._decision_function(X)
if self.decision_function_shape is None and len(self.classes_) > 2:
warnings.warn("The decision_function_shape default value will "
"change from 'ovo' to 'ovr' in 0.18. This will change "
"the shape of the decision function returned by "
"SVC.", ChangedBehaviorWarning)
if self.decision_function_shape == 'ovr':
return _ovr_decision_function(dec < 0, dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError("predict_proba is not available when "
" probability=False")
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError("predict_proba is not available when fitted "
"with probability=False")
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss functin,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
# FIXME loss.lower() --> loss in 0.18
_solver_pen = _solver_type_dict.get(loss.lower(), None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
# FIME penalty.lower() --> penalty in 0.18
_solver_dual = _solver_pen.get(penalty.lower(), None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s' "
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'balanced'}, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficent vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
# FIXME Remove case insensitivity in 0.18 ---------------------
loss_l, penalty_l = loss.lower(), penalty.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
if (not loss.islower()) and loss_l not in ('l1', 'l2'):
warnings.warn(msg % (loss, loss_l, "0.18"),
DeprecationWarning)
if not penalty.islower():
warnings.warn(msg.replace("loss", "penalty")
% (penalty, penalty_l, "0.18"),
DeprecationWarning)
# -------------------------------------------------------------
# FIXME loss_l --> loss in 0.18
if loss_l not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter and verbose > 0:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
| |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from typing import Optional, List, Dict, Sequence, Set
from enum import IntEnum
import copy
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QStandardItemModel, QStandardItem, QFont
from PyQt5.QtWidgets import QAbstractItemView, QMenu, QLabel, QHBoxLayout
from electrum_ltc.i18n import _
from electrum_ltc.transaction import PartialTxInput
from .util import MyTreeView, ColorScheme, MONOSPACE_FONT, EnterButton
class UTXOList(MyTreeView):
_spend_set: Optional[Set[str]] # coins selected by the user to spend from
_utxo_dict: Dict[str, PartialTxInput] # coin name -> coin
class Columns(IntEnum):
OUTPOINT = 0
ADDRESS = 1
LABEL = 2
AMOUNT = 3
HEIGHT = 4
headers = {
Columns.ADDRESS: _('Address'),
Columns.LABEL: _('Label'),
Columns.AMOUNT: _('Amount'),
Columns.HEIGHT: _('Height'),
Columns.OUTPOINT: _('Output point'),
}
filter_columns = [Columns.ADDRESS, Columns.LABEL, Columns.OUTPOINT]
stretch_column = Columns.LABEL
def __init__(self, parent):
super().__init__(parent, self.create_menu,
stretch_column=self.stretch_column,
editable_columns=[])
self._spend_set = None
self._utxo_dict = {}
self.wallet = self.parent.wallet
self.setModel(QStandardItemModel(self))
self.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.setSortingEnabled(True)
self.update()
def update(self):
# not calling maybe_defer_update() as it interferes with coincontrol status bar
utxos = self.wallet.get_utxos()
self._maybe_reset_spend_list(utxos)
self._utxo_dict = {}
self.model().clear()
self.update_headers(self.__class__.headers)
for idx, utxo in enumerate(utxos):
self.insert_utxo(idx, utxo)
self.filter()
# update coincontrol status bar
if self._spend_set is not None:
coins = [self._utxo_dict[x] for x in self._spend_set]
coins = self._filter_frozen_coins(coins)
amount = sum(x.value_sats() for x in coins)
amount_str = self.parent.format_amount_and_units(amount)
num_outputs_str = _("{} outputs available ({} total)").format(len(coins), len(utxos))
self.parent.set_coincontrol_msg(_("Coin control active") + f': {num_outputs_str}, {amount_str}')
else:
self.parent.set_coincontrol_msg(None)
def insert_utxo(self, idx, utxo: PartialTxInput):
address = utxo.address
height = utxo.block_height
name = utxo.prevout.to_str()
name_short = utxo.prevout.txid.hex()[:16] + '...' + ":%d" % utxo.prevout.out_idx
self._utxo_dict[name] = utxo
label = self.wallet.get_label(utxo.prevout.txid.hex())
amount = self.parent.format_amount(utxo.value_sats(), whitespaces=True)
labels = [name_short, address, label, amount, '%d'%height]
utxo_item = [QStandardItem(x) for x in labels]
self.set_editability(utxo_item)
utxo_item[self.Columns.OUTPOINT].setData(name, self.ROLE_CLIPBOARD_DATA)
utxo_item[self.Columns.ADDRESS].setFont(QFont(MONOSPACE_FONT))
utxo_item[self.Columns.AMOUNT].setFont(QFont(MONOSPACE_FONT))
utxo_item[self.Columns.OUTPOINT].setFont(QFont(MONOSPACE_FONT))
utxo_item[self.Columns.ADDRESS].setData(name, Qt.UserRole)
SELECTED_TO_SPEND_TOOLTIP = _('Coin selected to be spent')
if name in (self._spend_set or set()):
for col in utxo_item:
col.setBackground(ColorScheme.GREEN.as_color(True))
if col != self.Columns.OUTPOINT:
col.setToolTip(SELECTED_TO_SPEND_TOOLTIP)
if self.wallet.is_frozen_address(address):
utxo_item[self.Columns.ADDRESS].setBackground(ColorScheme.BLUE.as_color(True))
utxo_item[self.Columns.ADDRESS].setToolTip(_('Address is frozen'))
if self.wallet.is_frozen_coin(utxo):
utxo_item[self.Columns.OUTPOINT].setBackground(ColorScheme.BLUE.as_color(True))
utxo_item[self.Columns.OUTPOINT].setToolTip(f"{name}\n{_('Coin is frozen')}")
else:
tooltip = ("\n" + SELECTED_TO_SPEND_TOOLTIP) if name in (self._spend_set or set()) else ""
utxo_item[self.Columns.OUTPOINT].setToolTip(name + tooltip)
self.model().insertRow(idx, utxo_item)
def get_selected_outpoints(self) -> Optional[List[str]]:
if not self.model():
return None
items = self.selected_in_column(self.Columns.ADDRESS)
return [x.data(Qt.UserRole) for x in items]
def _filter_frozen_coins(self, coins: List[PartialTxInput]) -> List[PartialTxInput]:
coins = [utxo for utxo in coins
if (not self.wallet.is_frozen_address(utxo.address) and
not self.wallet.is_frozen_coin(utxo))]
return coins
def set_spend_list(self, coins: Optional[List[PartialTxInput]]):
if coins is not None:
coins = self._filter_frozen_coins(coins)
self._spend_set = {utxo.prevout.to_str() for utxo in coins}
else:
self._spend_set = None
self.update()
def get_spend_list(self) -> Optional[Sequence[PartialTxInput]]:
if self._spend_set is None:
return None
utxos = [self._utxo_dict[x] for x in self._spend_set]
return copy.deepcopy(utxos) # copy so that side-effects don't affect utxo_dict
def _maybe_reset_spend_list(self, current_wallet_utxos: Sequence[PartialTxInput]) -> None:
if self._spend_set is None:
return
# if we spent one of the selected UTXOs, just reset selection
utxo_set = {utxo.prevout.to_str() for utxo in current_wallet_utxos}
if not all([prevout_str in utxo_set for prevout_str in self._spend_set]):
self._spend_set = None
def create_menu(self, position):
selected = self.get_selected_outpoints()
if selected is None:
return
menu = QMenu()
menu.setSeparatorsCollapsible(True) # consecutive separators are merged together
coins = [self._utxo_dict[name] for name in selected]
if len(coins) == 0:
menu.addAction(_("Spend (select none)"), lambda: self.set_spend_list(coins))
else:
menu.addAction(_("Spend"), lambda: self.set_spend_list(coins))
if len(coins) == 1:
utxo = coins[0]
addr = utxo.address
txid = utxo.prevout.txid.hex()
# "Details"
tx = self.wallet.db.get_transaction(txid)
if tx:
label = self.wallet.get_label(txid) or None # Prefer None if empty (None hides the Description: field in the window)
menu.addAction(_("Details"), lambda: self.parent.show_transaction(tx, tx_desc=label))
# "Copy ..."
idx = self.indexAt(position)
if not idx.isValid():
return
self.add_copy_menu(menu, idx)
# "Freeze coin"
if not self.wallet.is_frozen_coin(utxo):
menu.addAction(_("Freeze Coin"), lambda: self.parent.set_frozen_state_of_coins([utxo], True))
else:
menu.addSeparator()
menu.addAction(_("Coin is frozen"), lambda: None).setEnabled(False)
menu.addAction(_("Unfreeze Coin"), lambda: self.parent.set_frozen_state_of_coins([utxo], False))
menu.addSeparator()
# "Freeze address"
if not self.wallet.is_frozen_address(addr):
menu.addAction(_("Freeze Address"), lambda: self.parent.set_frozen_state_of_addresses([addr], True))
else:
menu.addSeparator()
menu.addAction(_("Address is frozen"), lambda: None).setEnabled(False)
menu.addAction(_("Unfreeze Address"), lambda: self.parent.set_frozen_state_of_addresses([addr], False))
menu.addSeparator()
elif len(coins) > 1: # multiple items selected
menu.addSeparator()
addrs = [utxo.address for utxo in coins]
is_coin_frozen = [self.wallet.is_frozen_coin(utxo) for utxo in coins]
is_addr_frozen = [self.wallet.is_frozen_address(utxo.address) for utxo in coins]
if not all(is_coin_frozen):
menu.addAction(_("Freeze Coins"), lambda: self.parent.set_frozen_state_of_coins(coins, True))
if any(is_coin_frozen):
menu.addAction(_("Unfreeze Coins"), lambda: self.parent.set_frozen_state_of_coins(coins, False))
if not all(is_addr_frozen):
menu.addAction(_("Freeze Addresses"), lambda: self.parent.set_frozen_state_of_addresses(addrs, True))
if any(is_addr_frozen):
menu.addAction(_("Unfreeze Addresses"), lambda: self.parent.set_frozen_state_of_addresses(addrs, False))
menu.exec_(self.viewport().mapToGlobal(position))
| |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'SystemMessageSeverityEnum' : _MetaInfoEnum('SystemMessageSeverityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper',
{
'message-severity-unknown':'message_severity_unknown',
'message-severity-emergency':'message_severity_emergency',
'message-severity-alert':'message_severity_alert',
'message-severity-critical':'message_severity_critical',
'message-severity-error':'message_severity_error',
'message-severity-warning':'message_severity_warning',
'message-severity-notice':'message_severity_notice',
'message-severity-informational':'message_severity_informational',
'message-severity-debug':'message_severity_debug',
}, 'Cisco-IOS-XR-infra-syslog-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper']),
'Logging.History' : {
'meta_info' : _MetaInfoClass('Logging.History',
False,
[
_MetaInfoClassMember('message', ATTRIBUTE, 'str' , None, None,
[], [],
''' Syslog Message
''',
'message',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('properties', ATTRIBUTE, 'str' , None, None,
[], [],
''' Syslog Properties
''',
'properties',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Logging' : {
'meta_info' : _MetaInfoClass('Logging',
False,
[
_MetaInfoClassMember('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Logging.History',
[], [],
''' Syslog Info
''',
'history',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'logging',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingFiles.FileLogDetail' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingFiles.FileLogDetail',
False,
[
_MetaInfoClassMember('file-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' File name for logging messages
''',
'file_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('file-path', ATTRIBUTE, 'str' , None, None,
[], [],
''' File path for logging messages
''',
'file_path',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'file-log-detail',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingFiles' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingFiles',
False,
[
_MetaInfoClassMember('file-log-detail', REFERENCE_LIST, 'FileLogDetail' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingFiles.FileLogDetail',
[], [],
''' Logging Files
''',
'file_log_detail',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'logging-files',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.AnRemoteServers.AnRemoteLogServer' : {
'meta_info' : _MetaInfoClass('Syslog.AnRemoteServers.AnRemoteLogServer',
False,
[
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP Address
''',
'ip_address',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('rh-discriminator', ATTRIBUTE, 'str' , None, None,
[], [],
''' Remote-Host Discriminator
''',
'rh_discriminator',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF Name
''',
'vrf_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('vrf-severity', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF Severity
''',
'vrf_severity',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'an-remote-log-server',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.AnRemoteServers' : {
'meta_info' : _MetaInfoClass('Syslog.AnRemoteServers',
False,
[
_MetaInfoClassMember('an-remote-log-server', REFERENCE_LIST, 'AnRemoteLogServer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.AnRemoteServers.AnRemoteLogServer',
[], [],
''' AN Remote Log Servers
''',
'an_remote_log_server',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'an-remote-servers',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.Messages.Message' : {
'meta_info' : _MetaInfoClass('Syslog.Messages.Message',
False,
[
_MetaInfoClassMember('message-id', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Message ID of the system message
''',
'message_id',
'Cisco-IOS-XR-infra-syslog-oper', True),
_MetaInfoClassMember('card-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Message card location: 'RP', 'DRP', 'LC', 'SC',
'SP' or 'UNK'
''',
'card_type',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('category', ATTRIBUTE, 'str' , None, None,
[], [],
''' Message category
''',
'category',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Message group
''',
'group',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('message-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Message name
''',
'message_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], [b'([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Message source location
''',
'node_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('process-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Process name
''',
'process_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'SystemMessageSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'SystemMessageSeverityEnum',
[], [],
''' Message severity
''',
'severity',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('text', ATTRIBUTE, 'str' , None, None,
[], [],
''' Additional message text
''',
'text',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('time-of-day', ATTRIBUTE, 'str' , None, None,
[], [],
''' Time of day of event in DDD MMM DD YYYY HH:MM
:SS format, e.g Wed Apr 01 2009 15:50:26
''',
'time_of_day',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('time-stamp', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Time in milliseconds since 00:00:00 UTC, January
11970 of when message was generated
''',
'time_stamp',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('time-zone', ATTRIBUTE, 'str' , None, None,
[], [],
''' Time Zone in UTC+/-HH:MM format, e.g UTC+5:30,
UTC-6
''',
'time_zone',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'message',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.Messages' : {
'meta_info' : _MetaInfoClass('Syslog.Messages',
False,
[
_MetaInfoClassMember('message', REFERENCE_LIST, 'Message' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.Messages.Message',
[], [],
''' A system message
''',
'message',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'messages',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics.LoggingStats' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics.LoggingStats',
False,
[
_MetaInfoClassMember('drop-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of messages dropped
''',
'drop_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('flush-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of messages flushed
''',
'flush_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('is-log-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is log enabled
''',
'is_log_enabled',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('overrun-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of messages overrun
''',
'overrun_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'logging-stats',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics.ConsoleLoggingStats' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics.ConsoleLoggingStats',
False,
[
_MetaInfoClassMember('buffer-size', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Buffer size in bytes if logging buffer isenabled
''',
'buffer_size',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('is-log-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is log enabled
''',
'is_log_enabled',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('message-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message count
''',
'message_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'SystemMessageSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'SystemMessageSeverityEnum',
[], [],
''' Configured severity
''',
'severity',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'console-logging-stats',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics.MonitorLoggingStats' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics.MonitorLoggingStats',
False,
[
_MetaInfoClassMember('buffer-size', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Buffer size in bytes if logging buffer isenabled
''',
'buffer_size',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('is-log-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is log enabled
''',
'is_log_enabled',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('message-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message count
''',
'message_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'SystemMessageSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'SystemMessageSeverityEnum',
[], [],
''' Configured severity
''',
'severity',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'monitor-logging-stats',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics.TrapLoggingStats' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics.TrapLoggingStats',
False,
[
_MetaInfoClassMember('buffer-size', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Buffer size in bytes if logging buffer isenabled
''',
'buffer_size',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('is-log-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is log enabled
''',
'is_log_enabled',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('message-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message count
''',
'message_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'SystemMessageSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'SystemMessageSeverityEnum',
[], [],
''' Configured severity
''',
'severity',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'trap-logging-stats',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics.BufferLoggingStats' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics.BufferLoggingStats',
False,
[
_MetaInfoClassMember('buffer-size', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Buffer size in bytes if logging buffer isenabled
''',
'buffer_size',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('is-log-enabled', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Is log enabled
''',
'is_log_enabled',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('message-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message count
''',
'message_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'SystemMessageSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'SystemMessageSeverityEnum',
[], [],
''' Configured severity
''',
'severity',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'buffer-logging-stats',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics.RemoteLoggingStat' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics.RemoteLoggingStat',
False,
[
_MetaInfoClassMember('message-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message count
''',
'message_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('remote-host-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Remote hostname
''',
'remote_host_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'remote-logging-stat',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics.TlsRemoteLoggingStat' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics.TlsRemoteLoggingStat',
False,
[
_MetaInfoClassMember('message-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message count
''',
'message_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('remote-host-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' TLS Remote hostname
''',
'remote_host_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'tls-remote-logging-stat',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics.FileLoggingStat' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics.FileLoggingStat',
False,
[
_MetaInfoClassMember('file-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' File name for logging messages
''',
'file_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('message-count', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Message count
''',
'message_count',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'file-logging-stat',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.LoggingStatistics' : {
'meta_info' : _MetaInfoClass('Syslog.LoggingStatistics',
False,
[
_MetaInfoClassMember('buffer-logging-stats', REFERENCE_CLASS, 'BufferLoggingStats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics.BufferLoggingStats',
[], [],
''' Buffer logging statistics
''',
'buffer_logging_stats',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('console-logging-stats', REFERENCE_CLASS, 'ConsoleLoggingStats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics.ConsoleLoggingStats',
[], [],
''' Console logging statistics
''',
'console_logging_stats',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('file-logging-stat', REFERENCE_LIST, 'FileLoggingStat' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics.FileLoggingStat',
[], [],
''' File logging statistics
''',
'file_logging_stat',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('logging-stats', REFERENCE_CLASS, 'LoggingStats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics.LoggingStats',
[], [],
''' Logging statistics
''',
'logging_stats',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('monitor-logging-stats', REFERENCE_CLASS, 'MonitorLoggingStats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics.MonitorLoggingStats',
[], [],
''' Monitor loggingstatistics
''',
'monitor_logging_stats',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('remote-logging-stat', REFERENCE_LIST, 'RemoteLoggingStat' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics.RemoteLoggingStat',
[], [],
''' Remote logging statistics
''',
'remote_logging_stat',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('tls-remote-logging-stat', REFERENCE_LIST, 'TlsRemoteLoggingStat' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics.TlsRemoteLoggingStat',
[], [],
''' TLS Remote logging statistics
''',
'tls_remote_logging_stat',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('trap-logging-stats', REFERENCE_CLASS, 'TrapLoggingStats' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics.TrapLoggingStats',
[], [],
''' Trap logging statistics
''',
'trap_logging_stats',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'logging-statistics',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog' : {
'meta_info' : _MetaInfoClass('Syslog',
False,
[
_MetaInfoClassMember('an-remote-servers', REFERENCE_CLASS, 'AnRemoteServers' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.AnRemoteServers',
[], [],
''' Logging AN remote servers information
''',
'an_remote_servers',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('logging-files', REFERENCE_CLASS, 'LoggingFiles' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingFiles',
[], [],
''' Logging Files information
''',
'logging_files',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('logging-statistics', REFERENCE_CLASS, 'LoggingStatistics' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.LoggingStatistics',
[], [],
''' Logging statistics information
''',
'logging_statistics',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('messages', REFERENCE_CLASS, 'Messages' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.Messages',
[], [],
''' Message table information
''',
'messages',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'syslog',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
}
_meta_table['Logging.History']['meta_info'].parent =_meta_table['Logging']['meta_info']
_meta_table['Syslog.LoggingFiles.FileLogDetail']['meta_info'].parent =_meta_table['Syslog.LoggingFiles']['meta_info']
_meta_table['Syslog.AnRemoteServers.AnRemoteLogServer']['meta_info'].parent =_meta_table['Syslog.AnRemoteServers']['meta_info']
_meta_table['Syslog.Messages.Message']['meta_info'].parent =_meta_table['Syslog.Messages']['meta_info']
_meta_table['Syslog.LoggingStatistics.LoggingStats']['meta_info'].parent =_meta_table['Syslog.LoggingStatistics']['meta_info']
_meta_table['Syslog.LoggingStatistics.ConsoleLoggingStats']['meta_info'].parent =_meta_table['Syslog.LoggingStatistics']['meta_info']
_meta_table['Syslog.LoggingStatistics.MonitorLoggingStats']['meta_info'].parent =_meta_table['Syslog.LoggingStatistics']['meta_info']
_meta_table['Syslog.LoggingStatistics.TrapLoggingStats']['meta_info'].parent =_meta_table['Syslog.LoggingStatistics']['meta_info']
_meta_table['Syslog.LoggingStatistics.BufferLoggingStats']['meta_info'].parent =_meta_table['Syslog.LoggingStatistics']['meta_info']
_meta_table['Syslog.LoggingStatistics.RemoteLoggingStat']['meta_info'].parent =_meta_table['Syslog.LoggingStatistics']['meta_info']
_meta_table['Syslog.LoggingStatistics.TlsRemoteLoggingStat']['meta_info'].parent =_meta_table['Syslog.LoggingStatistics']['meta_info']
_meta_table['Syslog.LoggingStatistics.FileLoggingStat']['meta_info'].parent =_meta_table['Syslog.LoggingStatistics']['meta_info']
_meta_table['Syslog.LoggingFiles']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.AnRemoteServers']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.Messages']['meta_info'].parent =_meta_table['Syslog']['meta_info']
_meta_table['Syslog.LoggingStatistics']['meta_info'].parent =_meta_table['Syslog']['meta_info']
| |
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from django.template import Context, loader
from django.utils import datetime_safe, six
from haystack.exceptions import SearchFieldError
from haystack.utils import get_model_ct_tuple
class NOT_PROVIDED:
pass
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the SearchFields variants.
class SearchField(object):
"""The base implementation of a search field."""
field_type = None
def __init__(self, model_attr=None, use_template=False, template_name=None,
document=False, indexed=True, stored=True, faceted=False,
default=NOT_PROVIDED, null=False, index_fieldname=None,
facet_class=None, boost=1.0, weight=None):
# Track what the index thinks this field is called.
self.instance_name = None
self.model_attr = model_attr
self.use_template = use_template
self.template_name = template_name
self.document = document
self.indexed = indexed
self.stored = stored
self.faceted = faceted
self._default = default
self.null = null
self.index_fieldname = index_fieldname
self.boost = weight or boost
self.is_multivalued = False
# We supply the facet_class for making it easy to create a faceted
# field based off of this field.
self.facet_class = facet_class
if self.facet_class is None:
self.facet_class = FacetCharField
self.set_instance_name(None)
def set_instance_name(self, instance_name):
self.instance_name = instance_name
if self.index_fieldname is None:
self.index_fieldname = self.instance_name
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def prepare(self, obj):
"""
Takes data from the provided object and prepares it for storage in the
index.
"""
# Give priority to a template.
if self.use_template:
return self.prepare_template(obj)
elif self.model_attr is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.model_attr.split('__')
current_object = obj
for attr in attrs:
if not hasattr(current_object, attr):
raise SearchFieldError("The model '%s' does not have a model_attr '%s'." % (repr(current_object), attr))
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
else:
raise SearchFieldError("The model '%s' combined with model_attr '%s' returned None, but doesn't allow a default or null value." % (repr(obj), self.model_attr))
if callable(current_object):
return current_object()
return current_object
if self.has_default():
return self.default
else:
return None
def prepare_template(self, obj):
"""
Flattens an object for indexing.
This loads a template
(``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and
returns the result of rendering that template. ``object`` will be in
its context.
"""
if self.instance_name is None and self.template_name is None:
raise SearchFieldError("This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.")
if self.template_name is not None:
template_names = self.template_name
if not isinstance(template_names, (list, tuple)):
template_names = [template_names]
else:
app_label, model_name = get_model_ct_tuple(obj)
template_names = ['search/indexes/%s/%s_%s.txt' % (app_label, model_name, self.instance_name)]
t = loader.select_template(template_names)
return t.render(Context({'object': obj}))
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
class CharField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetCharField
super(CharField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(CharField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class LocationField(SearchField):
field_type = 'location'
def prepare(self, obj):
from haystack.utils.geo import ensure_point
value = super(LocationField, self).prepare(obj)
if value is None:
return None
pnt = ensure_point(value)
pnt_lng, pnt_lat = pnt.get_coords()
return "%s,%s" % (pnt_lat, pnt_lng)
def convert(self, value):
from haystack.utils.geo import ensure_point, Point
if value is None:
return None
if hasattr(value, 'geom_type'):
value = ensure_point(value)
return value
if isinstance(value, six.string_types):
lat, lng = value.split(',')
elif isinstance(value, (list, tuple)):
# GeoJSON-alike
lat, lng = value[1], value[0]
elif isinstance(value, dict):
lat = value.get('lat', 0)
lng = value.get('lon', 0)
value = Point(float(lng), float(lat))
return value
class NgramField(CharField):
field_type = 'ngram'
def __init__(self, **kwargs):
if kwargs.get('faceted') is True:
raise SearchFieldError("%s can not be faceted." % self.__class__.__name__)
super(NgramField, self).__init__(**kwargs)
class EdgeNgramField(NgramField):
field_type = 'edge_ngram'
class IntegerField(SearchField):
field_type = 'integer'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetIntegerField
super(IntegerField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(IntegerField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(SearchField):
field_type = 'float'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetFloatField
super(FloatField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(FloatField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDecimalField
super(DecimalField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(DecimalField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return six.text_type(value)
class BooleanField(SearchField):
field_type = 'boolean'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetBooleanField
super(BooleanField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(BooleanField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return bool(value)
class DateField(SearchField):
field_type = 'date'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateField
super(DateField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise SearchFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
class DateTimeField(SearchField):
field_type = 'datetime'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateTimeField
super(DateTimeField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second']))
else:
raise SearchFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
class MultiValueField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetMultiValueField
if kwargs.get('use_template') is True:
raise SearchFieldError("'%s' fields can not use templates to prepare their data." % self.__class__.__name__)
super(MultiValueField, self).__init__(**kwargs)
self.is_multivalued = True
def prepare(self, obj):
return self.convert(super(MultiValueField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return list(value)
class FacetField(SearchField):
"""
``FacetField`` is slightly different than the other fields because it can
work in conjunction with other fields as its data source.
Accepts an optional ``facet_for`` kwarg, which should be the field name
(not ``index_fieldname``) of the field it should pull data from.
"""
instance_name = None
def __init__(self, **kwargs):
handled_kwargs = self.handle_facet_parameters(kwargs)
super(FacetField, self).__init__(**handled_kwargs)
def handle_facet_parameters(self, kwargs):
if kwargs.get('faceted', False):
raise SearchFieldError("FacetField (%s) does not accept the 'faceted' argument." % self.instance_name)
if not kwargs.get('null', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'null' argument." % self.instance_name)
if not kwargs.get('indexed', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'indexed' argument." % self.instance_name)
if kwargs.get('facet_class'):
raise SearchFieldError("FacetField (%s) does not accept the 'facet_class' argument." % self.instance_name)
self.facet_for = None
self.facet_class = None
# Make sure the field is nullable.
kwargs['null'] = True
if 'facet_for' in kwargs:
self.facet_for = kwargs['facet_for']
del(kwargs['facet_for'])
return kwargs
def get_facet_for_name(self):
return self.facet_for or self.instance_name
class FacetCharField(FacetField, CharField):
pass
class FacetIntegerField(FacetField, IntegerField):
pass
class FacetFloatField(FacetField, FloatField):
pass
class FacetDecimalField(FacetField, DecimalField):
pass
class FacetBooleanField(FacetField, BooleanField):
pass
class FacetDateField(FacetField, DateField):
pass
class FacetDateTimeField(FacetField, DateTimeField):
pass
class FacetMultiValueField(FacetField, MultiValueField):
pass
| |
#!/usr/bin/env python
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
=========
fMRI: FSL
=========
A workflow that uses fsl to perform a first level analysis on the nipype
tutorial data set::
python fmri_fsl.py
First tell python where to find the appropriate functions.
"""
from __future__ import print_function
from __future__ import division
from builtins import str
from builtins import range
import os # system functions
import nipype.interfaces.io as nio # Data i/o
import nipype.interfaces.fsl as fsl # fsl
import nipype.interfaces.utility as util # utility
import nipype.pipeline.engine as pe # pypeline engine
import nipype.algorithms.modelgen as model # model generation
import nipype.algorithms.rapidart as ra # artifact detection
"""
Preliminaries
-------------
Setup any package specific configuration. The output file format for FSL
routines is being set to compressed NIFTI.
"""
fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
"""
Setting up workflows
--------------------
In this tutorial we will be setting up a hierarchical workflow for fsl
analysis. This will demonstrate how pre-defined workflows can be setup and
shared across users, projects and labs.
Setup preprocessing workflow
----------------------------
This is a generic fsl feat preprocessing workflow encompassing skull stripping,
motion correction and smoothing operations.
"""
preproc = pe.Workflow(name='preproc')
"""
Set up a node to define all inputs required for the preprocessing workflow
"""
inputnode = pe.Node(interface=util.IdentityInterface(fields=['func',
'struct', ]),
name='inputspec')
"""
Convert functional images to float representation. Since there can be more than
one functional run we use a MapNode to convert each run.
"""
img2float = pe.MapNode(interface=fsl.ImageMaths(out_data_type='float',
op_string='',
suffix='_dtype'),
iterfield=['in_file'],
name='img2float')
preproc.connect(inputnode, 'func', img2float, 'in_file')
"""
Extract the middle volume of the first run as the reference
"""
extract_ref = pe.Node(interface=fsl.ExtractROI(t_size=1),
name='extractref')
"""
Define a function to pick the first file from a list of files
"""
def pickfirst(files):
if isinstance(files, list):
return files[0]
else:
return files
preproc.connect(img2float, ('out_file', pickfirst), extract_ref, 'in_file')
"""
Define a function to return the 1 based index of the middle volume
"""
def getmiddlevolume(func):
from nibabel import load
funcfile = func
if isinstance(func, list):
funcfile = func[0]
_, _, _, timepoints = load(funcfile).shape
return int(timepoints / 2) - 1
preproc.connect(inputnode, ('func', getmiddlevolume), extract_ref, 't_min')
"""
Realign the functional runs to the middle volume of the first run
"""
motion_correct = pe.MapNode(interface=fsl.MCFLIRT(save_mats=True,
save_plots=True),
name='realign',
iterfield=['in_file'])
preproc.connect(img2float, 'out_file', motion_correct, 'in_file')
preproc.connect(extract_ref, 'roi_file', motion_correct, 'ref_file')
"""
Plot the estimated motion parameters
"""
plot_motion = pe.MapNode(interface=fsl.PlotMotionParams(in_source='fsl'),
name='plot_motion',
iterfield=['in_file'])
plot_motion.iterables = ('plot_type', ['rotations', 'translations'])
preproc.connect(motion_correct, 'par_file', plot_motion, 'in_file')
"""
Extract the mean volume of the first functional run
"""
meanfunc = pe.Node(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
name='meanfunc')
preproc.connect(motion_correct, ('out_file', pickfirst), meanfunc, 'in_file')
"""
Strip the skull from the mean functional to generate a mask
"""
meanfuncmask = pe.Node(interface=fsl.BET(mask=True,
no_output=True,
frac=0.3),
name='meanfuncmask')
preproc.connect(meanfunc, 'out_file', meanfuncmask, 'in_file')
"""
Mask the functional runs with the extracted mask
"""
maskfunc = pe.MapNode(interface=fsl.ImageMaths(suffix='_bet',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc')
preproc.connect(motion_correct, 'out_file', maskfunc, 'in_file')
preproc.connect(meanfuncmask, 'mask_file', maskfunc, 'in_file2')
"""
Determine the 2nd and 98th percentile intensities of each functional run
"""
getthresh = pe.MapNode(interface=fsl.ImageStats(op_string='-p 2 -p 98'),
iterfield=['in_file'],
name='getthreshold')
preproc.connect(maskfunc, 'out_file', getthresh, 'in_file')
"""
Threshold the first run of the functional data at 10% of the 98th percentile
"""
threshold = pe.Node(interface=fsl.ImageMaths(out_data_type='char',
suffix='_thresh'),
name='threshold')
preproc.connect(maskfunc, ('out_file', pickfirst), threshold, 'in_file')
"""
Define a function to get 10% of the intensity
"""
def getthreshop(thresh):
return '-thr %.10f -Tmin -bin' % (0.1 * thresh[0][1])
preproc.connect(getthresh, ('out_stat', getthreshop), threshold, 'op_string')
"""
Determine the median value of the functional runs using the mask
"""
medianval = pe.MapNode(interface=fsl.ImageStats(op_string='-k %s -p 50'),
iterfield=['in_file'],
name='medianval')
preproc.connect(motion_correct, 'out_file', medianval, 'in_file')
preproc.connect(threshold, 'out_file', medianval, 'mask_file')
"""
Dilate the mask
"""
dilatemask = pe.Node(interface=fsl.ImageMaths(suffix='_dil',
op_string='-dilF'),
name='dilatemask')
preproc.connect(threshold, 'out_file', dilatemask, 'in_file')
"""
Mask the motion corrected functional runs with the dilated mask
"""
maskfunc2 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc2')
preproc.connect(motion_correct, 'out_file', maskfunc2, 'in_file')
preproc.connect(dilatemask, 'out_file', maskfunc2, 'in_file2')
"""
Determine the mean image from each functional run
"""
meanfunc2 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc2')
preproc.connect(maskfunc2, 'out_file', meanfunc2, 'in_file')
"""
Merge the median values with the mean functional images into a coupled list
"""
mergenode = pe.Node(interface=util.Merge(2, axis='hstack'),
name='merge')
preproc.connect(meanfunc2, 'out_file', mergenode, 'in1')
preproc.connect(medianval, 'out_stat', mergenode, 'in2')
"""
Smooth each run using SUSAN with the brightness threshold set to 75% of the
median value for each run and a mask constituting the mean functional
"""
smooth = pe.MapNode(interface=fsl.SUSAN(),
iterfield=['in_file', 'brightness_threshold', 'usans'],
name='smooth')
"""
Define a function to get the brightness threshold for SUSAN
"""
def getbtthresh(medianvals):
return [0.75 * val for val in medianvals]
def getusans(x):
return [[tuple([val[0], 0.75 * val[1]])] for val in x]
preproc.connect(maskfunc2, 'out_file', smooth, 'in_file')
preproc.connect(medianval, ('out_stat', getbtthresh), smooth, 'brightness_threshold')
preproc.connect(mergenode, ('out', getusans), smooth, 'usans')
"""
Mask the smoothed data with the dilated mask
"""
maskfunc3 = pe.MapNode(interface=fsl.ImageMaths(suffix='_mask',
op_string='-mas'),
iterfield=['in_file'],
name='maskfunc3')
preproc.connect(smooth, 'smoothed_file', maskfunc3, 'in_file')
preproc.connect(dilatemask, 'out_file', maskfunc3, 'in_file2')
"""
Scale each volume of the run so that the median value of the run is set to 10000
"""
intnorm = pe.MapNode(interface=fsl.ImageMaths(suffix='_intnorm'),
iterfield=['in_file', 'op_string'],
name='intnorm')
preproc.connect(maskfunc3, 'out_file', intnorm, 'in_file')
"""
Define a function to get the scaling factor for intensity normalization
"""
def getinormscale(medianvals):
return ['-mul %.10f' % (10000. / val) for val in medianvals]
preproc.connect(medianval, ('out_stat', getinormscale), intnorm, 'op_string')
"""
Perform temporal highpass filtering on the data
"""
highpass = pe.MapNode(interface=fsl.ImageMaths(suffix='_tempfilt'),
iterfield=['in_file'],
name='highpass')
preproc.connect(intnorm, 'out_file', highpass, 'in_file')
"""
Generate a mean functional image from the first run
"""
meanfunc3 = pe.MapNode(interface=fsl.ImageMaths(op_string='-Tmean',
suffix='_mean'),
iterfield=['in_file'],
name='meanfunc3')
preproc.connect(highpass, ('out_file', pickfirst), meanfunc3, 'in_file')
"""
Strip the structural image and coregister the mean functional image to the
structural image
"""
nosestrip = pe.Node(interface=fsl.BET(frac=0.3),
name='nosestrip')
skullstrip = pe.Node(interface=fsl.BET(mask=True),
name='stripstruct')
coregister = pe.Node(interface=fsl.FLIRT(dof=6),
name='coregister')
"""
Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity and/or movement.
"""
art = pe.MapNode(interface=ra.ArtifactDetect(use_differences=[True, False],
use_norm=True,
norm_threshold=1,
zintensity_threshold=3,
parameter_source='FSL',
mask_type='file'),
iterfield=['realigned_files', 'realignment_parameters'],
name="art")
preproc.connect([(inputnode, nosestrip, [('struct', 'in_file')]),
(nosestrip, skullstrip, [('out_file', 'in_file')]),
(skullstrip, coregister, [('out_file', 'in_file')]),
(meanfunc2, coregister, [(('out_file', pickfirst), 'reference')]),
(motion_correct, art, [('par_file', 'realignment_parameters')]),
(maskfunc2, art, [('out_file', 'realigned_files')]),
(dilatemask, art, [('out_file', 'mask_file')]),
])
"""
Set up model fitting workflow
-----------------------------
"""
modelfit = pe.Workflow(name='modelfit')
"""
Use :class:`nipype.algorithms.modelgen.SpecifyModel` to generate design information.
"""
modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
"""
Use :class:`nipype.interfaces.fsl.Level1Design` to generate a run specific fsf
file for analysis
"""
level1design = pe.Node(interface=fsl.Level1Design(), name="level1design")
"""
Use :class:`nipype.interfaces.fsl.FEATModel` to generate a run specific mat
file for use by FILMGLS
"""
modelgen = pe.MapNode(interface=fsl.FEATModel(), name='modelgen',
iterfield=['fsf_file', 'ev_files'])
"""
Use :class:`nipype.interfaces.fsl.FILMGLS` to estimate a model specified by a
mat file and a functional run
"""
modelestimate = pe.MapNode(interface=fsl.FILMGLS(smooth_autocorr=True,
mask_size=5,
threshold=1000),
name='modelestimate',
iterfield=['design_file', 'in_file'])
"""
Use :class:`nipype.interfaces.fsl.ContrastMgr` to generate contrast estimates
"""
conestimate = pe.MapNode(interface=fsl.ContrastMgr(), name='conestimate',
iterfield=['tcon_file', 'param_estimates',
'sigmasquareds', 'corrections',
'dof_file'])
modelfit.connect([
(modelspec, level1design, [('session_info', 'session_info')]),
(level1design, modelgen, [('fsf_files', 'fsf_file'),
('ev_files', 'ev_files')]),
(modelgen, modelestimate, [('design_file', 'design_file')]),
(modelgen, conestimate, [('con_file', 'tcon_file')]),
(modelestimate, conestimate, [('param_estimates', 'param_estimates'),
('sigmasquareds', 'sigmasquareds'),
('corrections', 'corrections'),
('dof_file', 'dof_file')]),
])
"""
Set up fixed-effects workflow
-----------------------------
"""
fixed_fx = pe.Workflow(name='fixedfx')
"""
Use :class:`nipype.interfaces.fsl.Merge` to merge the copes and
varcopes for each condition
"""
copemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
iterfield=['in_files'],
name="copemerge")
varcopemerge = pe.MapNode(interface=fsl.Merge(dimension='t'),
iterfield=['in_files'],
name="varcopemerge")
"""
Use :class:`nipype.interfaces.fsl.L2Model` to generate subject and condition
specific level 2 model design files
"""
level2model = pe.Node(interface=fsl.L2Model(),
name='l2model')
"""
Use :class:`nipype.interfaces.fsl.FLAMEO` to estimate a second level model
"""
flameo = pe.MapNode(interface=fsl.FLAMEO(run_mode='fe'), name="flameo",
iterfield=['cope_file', 'var_cope_file'])
fixed_fx.connect([(copemerge, flameo, [('merged_file', 'cope_file')]),
(varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
(level2model, flameo, [('design_mat', 'design_file'),
('design_con', 't_con_file'),
('design_grp', 'cov_split_file')]),
])
"""
Set up first-level workflow
---------------------------
"""
def sort_copes(files):
numelements = len(files[0])
outfiles = []
for i in range(numelements):
outfiles.insert(i, [])
for j, elements in enumerate(files):
outfiles[i].append(elements[i])
return outfiles
def num_copes(files):
return len(files)
firstlevel = pe.Workflow(name='firstlevel')
firstlevel.connect([(preproc, modelfit, [('highpass.out_file', 'modelspec.functional_runs'),
('art.outlier_files', 'modelspec.outlier_files'),
('highpass.out_file', 'modelestimate.in_file')]),
(preproc, fixed_fx, [('coregister.out_file', 'flameo.mask_file')]),
(modelfit, fixed_fx, [(('conestimate.copes', sort_copes), 'copemerge.in_files'),
(('conestimate.varcopes', sort_copes), 'varcopemerge.in_files'),
(('conestimate.copes', num_copes), 'l2model.num_copes'),
])
])
"""
Experiment specific components
------------------------------
The nipype tutorial contains data for two subjects. Subject data
is in two subdirectories, ``s1`` and ``s2``. Each subject directory
contains four functional volumes: f3.nii, f5.nii, f7.nii, f10.nii. And
one anatomical volume named struct.nii.
Below we set some variables to inform the ``datasource`` about the
layout of our data. We specify the location of the data, the subject
sub-directories and a dictionary that maps each run to a mnemonic (or
field) for the run type (``struct`` or ``func``). These fields become
the output fields of the ``datasource`` node in the pipeline.
In the example below, run 'f3' is of type 'func' and gets mapped to a
nifti filename through a template '%s.nii'. So 'f3' would become
'f3.nii'.
"""
# Specify the location of the data.
data_dir = os.path.abspath('data')
# Specify the subject directories
subject_list = ['s1'] # , 's3']
# Map field names to individual subject runs.
info = dict(func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]],
struct=[['subject_id', 'struct']])
infosource = pe.Node(interface=util.IdentityInterface(fields=['subject_id']),
name="infosource")
"""Here we set up iteration over all the subjects. The following line
is a particular example of the flexibility of the system. The
``datasource`` attribute ``iterables`` tells the pipeline engine that
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""
infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataSource` object and
fill in the information from above about the layout of our data. The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""
datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
outfields=['func', 'struct']),
name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Use the get_node function to retrieve an internal node by name. Then set the
iterables on this node to perform two different extents of smoothing.
"""
smoothnode = firstlevel.get_node('preproc.smooth')
assert(str(smoothnode) == 'preproc.smooth')
smoothnode.iterables = ('fwhm', [5., 10.])
hpcutoff = 120
TR = 3. # ensure float
firstlevel.inputs.preproc.highpass.suffix = '_hpf'
firstlevel.inputs.preproc.highpass.op_string = '-bptf %d -1' % (hpcutoff / TR)
"""
Setup a function that returns subject-specific information about the
experimental paradigm. This is used by the
:class:`nipype.interfaces.spm.SpecifyModel` to create the information necessary
to generate an SPM design matrix. In this tutorial, the same paradigm was used
for every participant. Other examples of this function are available in the
`doc/examples` folder. Note: Python knowledge required here.
"""
def subjectinfo(subject_id):
from nipype.interfaces.base import Bunch
from copy import deepcopy
print("Subject ID: %s\n" % str(subject_id))
output = []
names = ['Task-Odd', 'Task-Even']
for r in range(4):
onsets = [list(range(15, 240, 60)), list(range(45, 240, 60))]
output.insert(r,
Bunch(conditions=names,
onsets=deepcopy(onsets),
durations=[[15] for s in names],
amplitudes=None,
tmod=None,
pmod=None,
regressor_names=None,
regressors=None))
return output
"""
Setup the contrast structure that needs to be evaluated. This is a list of
lists. The inner list specifies the contrasts and has the following format -
[Name,Stat,[list of condition names],[weights on those conditions]. The
condition names must match the `names` listed in the `subjectinfo` function
described above.
"""
cont1 = ['Task>Baseline', 'T', ['Task-Odd', 'Task-Even'], [0.5, 0.5]]
cont2 = ['Task-Odd>Task-Even', 'T', ['Task-Odd', 'Task-Even'], [1, -1]]
cont3 = ['Task', 'F', [cont1, cont2]]
contrasts = [cont1, cont2]
firstlevel.inputs.modelfit.modelspec.input_units = 'secs'
firstlevel.inputs.modelfit.modelspec.time_repetition = TR
firstlevel.inputs.modelfit.modelspec.high_pass_filter_cutoff = hpcutoff
firstlevel.inputs.modelfit.level1design.interscan_interval = TR
firstlevel.inputs.modelfit.level1design.bases = {'dgamma': {'derivs': False}}
firstlevel.inputs.modelfit.level1design.contrasts = contrasts
firstlevel.inputs.modelfit.level1design.model_serial_correlations = True
"""
Set up complete workflow
========================
"""
l1pipeline = pe.Workflow(name="level1")
l1pipeline.base_dir = os.path.abspath('./fsl/workingdir')
l1pipeline.config = {"execution": {"crashdump_dir": os.path.abspath('./fsl/crashdumps')}}
l1pipeline.connect([(infosource, datasource, [('subject_id', 'subject_id')]),
(infosource, firstlevel, [(('subject_id', subjectinfo), 'modelfit.modelspec.subject_info')]),
(datasource, firstlevel, [('struct', 'preproc.inputspec.struct'),
('func', 'preproc.inputspec.func'),
]),
])
"""
Execute the pipeline
--------------------
The code discussed above sets up all the necessary data structures with
appropriate parameters and the connectivity between the processes, but does not
generate any output. To actually run the analysis on the data the
``nipype.pipeline.engine.Pipeline.Run`` function needs to be called.
"""
if __name__ == '__main__':
l1pipeline.write_graph()
outgraph = l1pipeline.run()
# l1pipeline.run(plugin='MultiProc', plugin_args={'n_procs':2})
| |
"""
A dataset that applies a transformation on the fly as examples
are requested.
"""
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from theano.compat.six import Iterator
from pylearn2.datasets.dataset import Dataset
from pylearn2.space import CompositeSpace
from pylearn2.utils import safe_zip
from pylearn2.utils.data_specs import is_flat_specs
from pylearn2.utils import wraps
class TransformerDataset(Dataset):
"""
A dataset that applies a transformation on the fly
as examples are requested.
"""
def __init__(self, raw, transformer, cpu_only=False,
space_preserving=False):
"""
.. todo::
WRITEME properly
Parameters
----------
raw : pylearn2 Dataset
Provides raw data
transformer: pylearn2 Block
To transform the data
"""
self.__dict__.update(locals())
del self.self
def get_batch_design(self, batch_size, include_labels=False):
"""
.. todo::
WRITEME
"""
raw = self.raw.get_batch_design(batch_size, include_labels)
if include_labels:
X, y = raw
else:
X = raw
X = self.transformer.perform(X)
if include_labels:
return X, y
return X
def get_test_set(self):
"""
.. todo::
WRITEME
"""
return TransformerDataset(raw=self.raw.get_test_set(),
transformer=self.transformer,
cpu_only=self.cpu_only,
space_preserving=self.space_preserving)
def get_batch_topo(self, batch_size):
"""
If the transformer has changed the space, we don't have a good
idea of how to do topology in the new space.
If the transformer just changes the values in the original space,
we can have the raw dataset provide the topology.
"""
X = self.get_batch_design(batch_size)
if self.space_preserving:
return self.raw.get_topological_view(X)
return X.reshape(X.shape[0], X.shape[1], 1, 1)
def iterator(self, mode=None, batch_size=None, num_batches=None,
rng=None, data_specs=None,
return_tuple=False):
"""
.. todo::
WRITEME
"""
# Build the right data_specs to query self.raw
if data_specs is not None:
assert is_flat_specs(data_specs)
space, source = data_specs
if not isinstance(source, tuple):
source = (source,)
if isinstance(space, CompositeSpace):
space = tuple(space.components)
else:
space = (space,)
# Put 'features' first, as this is what TransformerIterator
# is expecting
transform = False
if hasattr(self.raw, 'conv_sources'):
for so in self.raw.conv_sources:
if so in source:
transform = True
elif 'features' in source:
transform = True
if not transform:
# 'features is not needed, get things directly from
# the original data
raw_data_specs = data_specs
else:
if hasattr(self.raw, 'conv_sources'):
features_idx = source.index(self.raw.conv_sources[0])
else:
feature_idx = source.index('features')
if self.space_preserving:
# Ask self.raw for the data in the expected space,
# and self.transformer will operate in that space
feature_input_space = space[feature_idx]
else:
# We need to ask the transformer what its input space is
feature_input_space = self.transformer.get_input_space()
if hasattr(self.raw, 'conv_sources'):
raw_space = [feature_input_space if so in
self.raw.conv_sources else sp for sp,so in safe_zip(space, source)]
else:
raw_space = [feature_input_space if so == 'features'
else sp for sp,so in safe_zip(space, source)]
raw_space = CompositeSpace(raw_space)
raw_source = source
raw_data_specs = (raw_space, raw_source)
else:
raw_data_specs = None
raw_iterator = self.raw.iterator(
mode=mode, batch_size=batch_size,
num_batches=num_batches, rng=rng,
data_specs=raw_data_specs, return_tuple=return_tuple)
final_iterator = TransformerIterator(raw_iterator, self,
data_specs=data_specs)
return final_iterator
def has_targets(self):
"""
.. todo::
WRITEME
"""
return self.raw.has_targets()
def adjust_for_viewer(self, X):
"""
.. todo::
WRITEME
"""
if self.space_preserving:
return self.raw.adjust_for_viewer(X)
return X
def get_weights_view(self, *args, **kwargs):
"""
.. todo::
WRITEME
"""
if self.space_preserving:
return self.raw.get_weights_view(*args, **kwargs)
raise NotImplementedError()
def get_topological_view(self, *args, **kwargs):
"""
.. todo::
WRITEME
"""
if self.space_preserving:
return self.raw.get_weights_view(*args, **kwargs)
raise NotImplementedError()
def adjust_to_be_viewed_with(self, *args, **kwargs):
"""
.. todo::
WRITEME
"""
return self.raw.adjust_to_be_viewed_with(*args, **kwargs)
@wraps(Dataset.get_num_examples)
def get_num_examples(self):
return self.raw.get_num_examples()
class TransformerIterator(Iterator):
"""
.. todo::
WRITEME
"""
def __init__(self, raw_iterator, transformer_dataset, data_specs):
"""
.. todo::
WRITEME
"""
self.raw_iterator = raw_iterator
self.transformer_dataset = transformer_dataset
self.stochastic = raw_iterator.stochastic
self.uneven = raw_iterator.uneven
self.data_specs = data_specs
def __iter__(self):
"""
.. todo::
WRITEME
"""
return self
def __next__(self):
"""
.. todo::
WRITEME
"""
raw_batch = self.raw_iterator.next()
# Apply transformation on raw_batch, and format it
# in the requested Space
transformer = self.transformer_dataset.transformer
transformer.set_rand()
out_space, source = self.data_specs
if isinstance(out_space, CompositeSpace):
out_space = out_space.components[0]
if self.transformer_dataset.space_preserving:
# If the space is preserved, then raw_batch is already provided
# in the requested space
rval_space = out_space
else:
rval_space = transformer.get_output_space()
def transform(X_batch):
rval = transformer.perform(X_batch)
if rval_space != out_space:
rval = rval_space.np_format_as(rval, out_space)
return rval
if not isinstance(raw_batch, tuple):
# Only one source, return_tuple is False
rval = transform(raw_batch)
else:
# Apply the transformer only on the first element
if hasattr(self.transformer_dataset.raw, 'conv_sources'):
rval = tuple([transform(batch) if so in
self.transformer_dataset.raw.conv_sources
else batch for so,batch in safe_zip(source,raw_batch)])
else:
rval = tuple([transform(batch) if so == 'features'
else batch for so,batch in safe_zip(source,raw_batch)])
return rval
@property
def num_examples(self):
"""
.. todo::
WRITEME
"""
return self.raw_iterator.num_examples
| |
#!/usr/bin/python
#
# occi_api.py - common functions, classes, and variables for Vcycle
#
# THIS FILE NEEDS UPDATING FOR Vcycle 3.0 CHANGES!
#
# Andrew McNab, University of Manchester.
# Luis Villazon Esteban, CERN.
# Copyright (c) 2013-5. All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# o Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
# o Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
# TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Contacts: Andrew.McNab@cern.ch http://www.gridpp.ac.uk/vcycle/
# Luis.Villazon.Esteban@cern.ch
#
import requests
import time
import base64
import vcycle.vacutils
class OcciError(Exception):
pass
ca_path = '/etc/grid-security/occi.ca-certs'
class OcciSpace(vcycle.BaseSpace):
def __init__(self, api, apiVersion, spaceName, parser, spaceSectionName, updatePipes):
# Initialize data structures from configuration files
# Generic initialization
vcycle.BaseSpace.__init__(self, api, apiVersion, spaceName, parser, spaceSectionName, updatePipes)
# OCCI-specific initialization
try:
self.tenancy_name = parser.get(spaceSectionName, 'tenancy_name')
except Exception as e:
raise OcciError('tenancy_name is required in Occi [space ' + spaceName + '] (' + str(e) + ')')
try:
self.queryURL = parser.get(spaceSectionName, 'url')
except Exception as e:
raise OcciError('url is required in Occi [space ' + spaceName + '] (' + str(e) + ')')
if self.queryURL.endswith('/'):
self.queryURL = self.queryURL[:-1]
#check if proxy is in the configuration, if not, then use the username-password
if parser.has_option(spaceSectionName, 'proxy'):
self.userkey = parser.get(spaceSectionName, 'proxy')
self.usercert = parser.get(spaceSectionName, 'proxy')
else:
#check username and password are defined
if not parser.has_option(spaceSectionName, 'username'):
raise OcciError('username is required in Occi [space %s]' % spaceName)
if not parser.has_option(spaceSectionName, 'password'):
raise OcciError('password is required in Occi [space %s]' % spaceName)
self.username = parser.get(spaceSectionName, 'username')
self.password = ''.join([ chr(ord(c)-1) for c in parser.get(spaceSectionName, 'password')])
self._create_ca_file()
def connect(self):
# Connect to the OCCI service
self.session = requests.Session()
self.session.mount(self.queryURL, requests.adapters.HTTPAdapter(pool_connections=20))
#Retrieve token
keystone_url = self._get_keystone()
if keystone_url is not None:
vcycle.vacutils.logLine("Found Keystone URL %s" % keystone_url)
self.token = self._get_token(keystone_url)
self.session.headers.clear()
self.session.headers.update({"X-Auth-Token": self.token})
self.session.cert = self.usercert
self.session.verify = ca_path
self._get_definitions()
self.computeURL = "%s/compute/" % (self.queryURL)
vcycle.vacutils.logLine("Connected to %s for space %s" % (self.queryURL ,self.spaceName))
def scanMachines(self):
"""Query OCCI compute service for details of machines in this space"""
headers = {'Accept': 'application/occi+json',
'Content-Type': 'application/occi+json'}
try:
response = self.session.get(self.computeURL)
except Exception as e:
raise OcciError('Cannot connect to ' + self.computeURL + ' (' + str(e) + ')')
# Convert machines from None to an empty dictionary since we successfully connected
self.machines = {}
for machineID in [line[line.rfind('/')+1:] for line in response.text.split("\n")[1:]]:
try:
response = self.session.get("%s/%s" % (self.computeURL, machineID), headers=headers)
except Exception as e:
raise OcciError('Cannot connect to %s/%s (%s)' %(self.computeURL, machineID, str(e)))
response = response.json()
machineName = response['attributes']['occi.compute.hostname']
occiState = response['attributes']['org.openstack.compute.state'].lower()
uuidStr = response['attributes']['occi.core.id']
try:
ip = response['links'][0]['attributes']['occi.networkinterface.address']
except:
ip = '0.0.0.0'
# Just in case other VMs are in this space
if machineName[:7] != 'vcycle-':
# Still count VMs that we didn't create and won't manage, to avoid going above space limit
self.totalProcessors += 1 # FIXME: GET THE REAL NUMBER NOT JUST 1
continue
# With OCCI will have to use our file datestamps to get transition times
try:
createdTime = int(open('/var/lib/vcycle/machines/' + machineName + '/started', 'r').read().strip())
updatedTime = int(open('/var/lib/vcycle/machines/' + machineName + '/started', 'r').read().strip())
startedTime = int(open('/var/lib/vcycle/machines/' + machineName + '/started', 'r').read().strip())
except:
createdTime = None
updatedTime = None
startedTime = None
if occiState == 'active':
state = vcycle.MachineState.running
elif occiState == 'inactive':
state = vcycle.MachineState.shutdown
else:
state = vcycle.MachineState.unknown
self.machines[machineName] = vcycle.shared.Machine(name=machineName,
spaceName=self.spaceName,
state=state,
ip=ip,
createdTime=createdTime,
startedTime=startedTime,
updatedTime=updatedTime,
uuidStr=uuidStr,
machinetypeName=None)
def createMachine(self, machineName, machinetypeName, zone = None):
# OCCI-specific machine creation steps
# 'metadata' : { 'cern-services' : 'false',
# 'machinefeatures' : 'http://' + os.uname()[1] + '/' + machineName + '/machinefeatures',
# 'jobfeatures' : 'http://' + os.uname()[1] + '/' + machineName + '/jobfeatures',
# 'machineoutputs' : 'https://' + os.uname()[1] + '/' + machineName + '/machineoutputs' }
import uuid
headers = {'X-Auth-Token': self.token,
'Accept': 'text/plain,text/occi',
'Content-Type': 'text/plain,text/occi',
'Connection': 'close'
}
image = self.machinetypes[machinetypeName].root_image[6:].strip()
data = 'Category: compute;scheme="http://schemas.ogf.org/occi/infrastructure#";class="kind";location="/compute/";title="Compute Resource"\n'
data += 'Category: %s;%s;class="mixin";location="/%s"\n' % (image, self.categories[image]['scheme'], image)
data += 'Category: %s;%s;class="mixin";location="/%s"\n' % (self.machinetypes[machinetypeName].flavor_names[0], self.categories[self.machinetypes[machinetypeName].flavor_names[0]]['scheme'], self.machinetypes[machinetypeName].flavor_names[0])
data += 'Category: user_data;"%s";class="mixin";location="%s";title="OS contextualization mixin"\n' % (self.categories['user_data']['scheme'], self.categories['user_data']['location']);
data += 'X-OCCI-Attribute: occi.core.id="%s"\n' % str(uuid.uuid4())
data += 'X-OCCI-Attribute: occi.core.title="%s"\n' % machineName
data += 'X-OCCI-Attribute: occi.compute.hostname="%s"\n' % machineName
data += 'X-OCCI-Attribute: org.openstack.compute.user_data="%s"' % base64.b64encode(open('/var/lib/vcycle/machines/' + machineName + '/user_data', 'r').read())
if self.machinetypes[machinetypeName].root_public_key:
if self.machinetypes[machinetypeName].root_public_key[0] == '/':
try:
f = open(self.machinetypes[machinetypeName].root_public_key, 'r')
except Exception as e:
OcciError('Cannot open ' + self.machinetypes[machinetypeName].root_public_key)
else:
try:
f = open('/var/lib/vcycle/' + self.spaceName + '/' + self.machinetypeName + '/' + self.machinetypes[machinetypeName].root_public_key, 'r')
except Exception as e:
OcciError('Cannot open ' + self.spaceName + '/' + self.machinetypeName + '/' + self.machinetypes[machinetypeName].root_public_key)
while True:
try:
line = f.read()
except:
raise OcciError('Cannot find ssh-rsa public key line in ' + self.machinetypes[machinetypeName].root_public_key)
if line[:8] == 'ssh-rsa ':
sshPublicKey = line.split(' ')[1]
data += 'X-OCCI-Attribute: org.openstack.credentials.publickey.data="ssh-rsa ' + sshPublicKey + ' vcycle"'
break
try:
response = self.session.post(self.computeURL, data=data, headers=headers)
if response.status_code not in [200, 201]:
raise OcciError(response.text)
except Exception as e:
raise OcciError('Cannot connect to ' + self.computeURL + ' (' + str(e) + ')')
vcycle.vacutils.logLine('Created ' + machineName + ' for ' + machinetypeName + ' within ' + self.spaceName)
self.machines[machineName] = vcycle.shared.Machine(name=machineName,
spaceName=self.spaceName,
state=vcycle.MachineState.starting,
ip='0.0.0.0',
createdTime=int(time.time()),
startedTime=int(time.time()),
updatedTime=int(time.time()),
uuidStr=None,
machinetypeName=machinetypeName)
return machineName
def deleteOneMachine(self, machineName):
"""Deletes a VM from the provider
:param machineName: vm identifier
"""
try:
self.session.delete("%s%s" % (self.computeURL, self.machines[machineName].uuidStr))
except Exception as e:
raise vcycle.shared.VcycleError('Cannot delete ' + machineName + ' via ' + self.computeURL + ' (' + str(e) + ')')
def _get_definitions(self):
"""Store the schema definitions to create VMs
"""
headers = {'X-Auth-Token': self.token,
'Accept': 'text/plain,text/occi'}
response = requests.get("%s/-/" % self.queryURL,
headers=headers,
cert=self.usercert,
verify=ca_path)
self.categories = {}
categories = response.text.split("\n")[1:]
for category in categories:
values = category.split(";")
cat = values[0][values[0].find(":")+1:].strip()
self.categories[cat] = {}
for property in values:
if property.find("scheme=") >= 0:
self.categories[cat]["scheme"] = property.strip()
if property.find("class=") >= 0:
self.categories[cat]["class"] = property.strip()
if property.find("title=") >= 0:
self.categories[cat]["title"] = property.strip()
if property.find("location=") >= 0:
aux = property.strip()
aux = aux.replace("https://","")
aux = aux.replace("http://","")
aux = aux[aux.find("/"):]
self.categories[cat]["location"] = 'location="'+aux
def _get_keystone(self):
""" Returns The authorization token to retrieve the OCCI token
:return: The keystone url
"""
try:
result = requests.head(self.queryURL + '/-/',
headers={"Content-Type": "application/json"},
cert=self.usercert,
verify=ca_path
)
except Exception as e:
raise OcciError('Cannot connect to ' + self.queryURL + ' (' + str(e) + ')')
# This is implicitly only for Keystone authentication
if result.status_code != 401 or result.headers is None:
raise OcciError('Do not recognise response when connecting to ' + self.queryURL)
if 'www-authenticate' not in result.headers:
return None
# Explicitly check for Keystone using hard-coded string index values for now
if not result.headers['www-authenticate'].startswith("Keystone uri="):
raise OcciError('Only Keystone authentication is currently supported (instead got "%s")' %
result.headers['www-authenticate'])
try:
keystoneURL = result.headers['www-authenticate'][14:-1]
keystoneURL = keystoneURL.replace("/v2.0", '')
except:
raise OcciError("Failed to find Keystone URL in %s" % result.headers['www-authenticate'])
return keystoneURL
def _get_token(self, keystone_url):
""" Returns The token to request OCCI site
:param keystone_url: URL to do the request
:return: The token
"""
if self.userkey is not None:
auth = {'auth': {'voms': True}}
else:
auth = {'auth': {
'passwordCredentials': {
'username': self.username,
'password': self.password
}
}
}
try:
result = {'response':requests.post(keystone_url+"/v2.0/tokens",
data='{"auth":{"voms": true}}',
headers={"Content-Type": "application/json"},
cert=self.usercert, verify=ca_path).json()}
except Exception as e:
raise OcciError('Cannot connect to ' + keystone_url + ' (' + str(e) + ')')
token = str(result['response']['access']['token']['id'])
tenants = self._get_tenants(keystone_url, token)
return self.__auth_in_tenant(keystone_url, token, tenants)
def _get_tenants(self, keystone_url, temporal_token):
""" Returns all the tenants available in the provider
:param token: Authorization token
:return: The name of all tenants
"""
result = {'response': requests.get("%s/v2.0/tenants/" % keystone_url,
data='{"auth":{"voms": true}}',
headers={"Content-Type": "application/json", "X-Auth-Token": temporal_token},
cert=self.usercert,
verify=ca_path).json()}
return [tenant['name'] for tenant in result['response']['tenants']]
def __auth_in_tenant(self, keystone_url, token, tenants):
""" Returns the token linked to the tenant
Loop all tenants, trying to authorize the user with each tenant, ones a tenant is valid, a token is returned
:param token: System token
:param tenants: list of tenants
:return: token and expiration date
"""
import json
for tenant in tenants:
data = {'auth': {'voms': True, 'tenantName': tenant}}
headers = {
'Accept': 'application/json',
'X-Auth-Token': token,
'User-Agent': 'Vcycle ' + vcycle.shared.vcycleVersion + ' ( OCCI/1.1 )',
'Content-Type': 'application/json',
'Content-Length': len(json.dumps(data))
}
try:
result = {'response': requests.post("%s/v2.0/tokens" % keystone_url,
data=json.dumps(data),
headers= headers,
cert=self.usercert,
verify=ca_path).json()}
except Exception as e:
print e
if 'access' in result['response']:
return result['response']['access']['token']['id']
def _create_ca_file(self):
import subprocess
import os.path
if not os.path.exists(ca_path):
subprocess.call('cat `ls /etc/grid-security/certificates/*.pem` > %s' % ca_path,
shell=True)
else:
modification_time = os.lstat(ca_path).st_mtime
for file in os.listdir('/etc/grid-security/certificates/'):
if os.lstat('/etc/grid-security/certificates/%s' % file).st_mtime > modification_time:
subprocess.call('cat `ls /etc/grid-security/certificates/*.pem` > %s' % ca_path,
shell=True)
return
| |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
import collections
import functools
import inspect
from typing import Union, List, Type, Callable, TypeVar, Generic
from pyflink.java_gateway import get_gateway
from pyflink.metrics import MetricGroup
from pyflink.table import Expression
from pyflink.table.types import DataType, _to_java_type, _to_java_data_type
from pyflink.util import utils
__all__ = ['FunctionContext', 'AggregateFunction', 'ScalarFunction', 'TableFunction',
'udf', 'udtf', 'udaf']
class FunctionContext(object):
"""
Used to obtain global runtime information about the context in which the
user-defined function is executed. The information includes the metric group,
and global job parameters, etc.
"""
def __init__(self, base_metric_group):
self._base_metric_group = base_metric_group
def get_metric_group(self) -> MetricGroup:
"""
Returns the metric group for this parallel subtask.
.. versionadded:: 1.11.0
"""
if self._base_metric_group is None:
raise RuntimeError("Metric has not been enabled. You can enable "
"metric with the 'python.metric.enabled' configuration.")
return self._base_metric_group
class UserDefinedFunction(abc.ABC):
"""
Base interface for user-defined function.
.. versionadded:: 1.10.0
"""
def open(self, function_context: FunctionContext):
"""
Initialization method for the function. It is called before the actual working methods
and thus suitable for one time setup work.
:param function_context: the context of the function
:type function_context: FunctionContext
"""
pass
def close(self):
"""
Tear-down method for the user code. It is called after the last call to the main
working methods.
"""
pass
def is_deterministic(self) -> bool:
"""
Returns information about the determinism of the function's results.
It returns true if and only if a call to this function is guaranteed to
always return the same result given the same parameters. true is assumed by default.
If the function is not pure functional like random(), date(), now(),
this method must return false.
:return: the determinism of the function's results.
"""
return True
class ScalarFunction(UserDefinedFunction):
"""
Base interface for user-defined scalar function. A user-defined scalar functions maps zero, one,
or multiple scalar values to a new scalar value.
.. versionadded:: 1.10.0
"""
@abc.abstractmethod
def eval(self, *args):
"""
Method which defines the logic of the scalar function.
"""
pass
class TableFunction(UserDefinedFunction):
"""
Base interface for user-defined table function. A user-defined table function creates zero, one,
or multiple rows to a new row value.
.. versionadded:: 1.11.0
"""
@abc.abstractmethod
def eval(self, *args):
"""
Method which defines the logic of the table function.
"""
pass
T = TypeVar('T')
ACC = TypeVar('ACC')
class AggregateFunction(UserDefinedFunction, Generic[T, ACC]):
"""
Base interface for user-defined aggregate function. A user-defined aggregate function maps
scalar values of multiple rows to a new scalar value.
.. versionadded:: 1.12.0
"""
@abc.abstractmethod
def get_value(self, accumulator: ACC) -> T:
"""
Called every time when an aggregation result should be materialized. The returned value
could be either an early and incomplete result (periodically emitted as data arrives) or
the final result of the aggregation.
:param accumulator: the accumulator which contains the current intermediate results
:return: the aggregation result
"""
pass
@abc.abstractmethod
def create_accumulator(self) -> ACC:
"""
Creates and initializes the accumulator for this AggregateFunction.
:return: the accumulator with the initial value
"""
pass
@abc.abstractmethod
def accumulate(self, accumulator: ACC, *args):
"""
Processes the input values and updates the provided accumulator instance.
:param accumulator: the accumulator which contains the current aggregated results
:param args: the input value (usually obtained from new arrived data)
"""
pass
def retract(self, accumulator: ACC, *args):
"""
Retracts the input values from the accumulator instance.The current design assumes the
inputs are the values that have been previously accumulated.
:param accumulator: the accumulator which contains the current aggregated results
:param args: the input value (usually obtained from new arrived data).
"""
pass
def merge(self, accumulator: ACC, accumulators):
"""
Merges a group of accumulator instances into one accumulator instance. This method must be
implemented for unbounded session window grouping aggregates and bounded grouping
aggregates.
:param accumulator: the accumulator which will keep the merged aggregate results. It should
be noted that the accumulator may contain the previous aggregated
results. Therefore user should not replace or clean this instance in the
custom merge method.
:param accumulators: a group of accumulators that will be merged.
"""
pass
def get_result_type(self) -> DataType:
"""
Returns the DataType of the AggregateFunction's result.
:return: The :class:`~pyflink.table.types.DataType` of the AggregateFunction's result.
"""
pass
def get_accumulator_type(self) -> DataType:
"""
Returns the DataType of the AggregateFunction's accumulator.
:return: The :class:`~pyflink.table.types.DataType` of the AggregateFunction's accumulator.
"""
pass
class DelegatingScalarFunction(ScalarFunction):
"""
Helper scalar function implementation for lambda expression and python function. It's for
internal use only.
"""
def __init__(self, func):
self.func = func
def eval(self, *args):
return self.func(*args)
class DelegationTableFunction(TableFunction):
"""
Helper table function implementation for lambda expression and python function. It's for
internal use only.
"""
def __init__(self, func):
self.func = func
def eval(self, *args):
return self.func(*args)
class DelegatingPandasAggregateFunction(AggregateFunction):
"""
Helper pandas aggregate function implementation for lambda expression and python function.
It's for internal use only.
"""
def __init__(self, func):
self.func = func
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return []
def accumulate(self, accumulator, *args):
accumulator.append(self.func(*args))
class PandasAggregateFunctionWrapper(object):
"""
Wrapper for Pandas Aggregate function.
"""
def __init__(self, func: AggregateFunction):
self.func = func
def open(self, function_context: FunctionContext):
self.func.open(function_context)
def eval(self, *args):
accumulator = self.func.create_accumulator()
self.func.accumulate(accumulator, *args)
return self.func.get_value(accumulator)
def close(self):
self.func.close()
class UserDefinedFunctionWrapper(object):
"""
Base Wrapper for Python user-defined function. It handles things like converting lambda
functions to user-defined functions, creating the Java user-defined function representation,
etc. It's for internal use only.
"""
def __init__(self, func, input_types, func_type, deterministic=None, name=None):
if inspect.isclass(func) or (
not isinstance(func, UserDefinedFunction) and not callable(func)):
raise TypeError(
"Invalid function: not a function or callable (__call__ is not defined): {0}"
.format(type(func)))
if input_types is not None:
from pyflink.table.types import RowType
if not isinstance(input_types, collections.Iterable) \
or isinstance(input_types, RowType):
input_types = [input_types]
for input_type in input_types:
if not isinstance(input_type, DataType):
raise TypeError(
"Invalid input_type: input_type should be DataType but contains {}".format(
input_type))
self._func = func
self._input_types = input_types
self._name = name or (
func.__name__ if hasattr(func, '__name__') else func.__class__.__name__)
if deterministic is not None and isinstance(func, UserDefinedFunction) and deterministic \
!= func.is_deterministic():
raise ValueError("Inconsistent deterministic: {} and {}".format(
deterministic, func.is_deterministic()))
# default deterministic is True
self._deterministic = deterministic if deterministic is not None else (
func.is_deterministic() if isinstance(func, UserDefinedFunction) else True)
self._func_type = func_type
self._judf_placeholder = None
def __call__(self, *args) -> Expression:
from pyflink.table import expressions as expr
return expr.call(self, *args)
def java_user_defined_function(self):
if self._judf_placeholder is None:
gateway = get_gateway()
def get_python_function_kind():
JPythonFunctionKind = gateway.jvm.org.apache.flink.table.functions.python. \
PythonFunctionKind
if self._func_type == "general":
return JPythonFunctionKind.GENERAL
elif self._func_type == "pandas":
return JPythonFunctionKind.PANDAS
else:
raise TypeError("Unsupported func_type: %s." % self._func_type)
if self._input_types is not None:
j_input_types = utils.to_jarray(
gateway.jvm.TypeInformation, [_to_java_type(i) for i in self._input_types])
else:
j_input_types = None
j_function_kind = get_python_function_kind()
func = self._func
if not isinstance(self._func, UserDefinedFunction):
func = self._create_delegate_function()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
self._judf_placeholder = \
self._create_judf(serialized_func, j_input_types, j_function_kind)
return self._judf_placeholder
def _create_delegate_function(self) -> UserDefinedFunction:
pass
def _create_judf(self, serialized_func, j_input_types, j_function_kind):
pass
class UserDefinedScalarFunctionWrapper(UserDefinedFunctionWrapper):
"""
Wrapper for Python user-defined scalar function.
"""
def __init__(self, func, input_types, result_type, func_type, deterministic, name):
super(UserDefinedScalarFunctionWrapper, self).__init__(
func, input_types, func_type, deterministic, name)
if not isinstance(result_type, DataType):
raise TypeError(
"Invalid returnType: returnType should be DataType but is {}".format(result_type))
self._result_type = result_type
self._judf_placeholder = None
def _create_judf(self, serialized_func, j_input_types, j_function_kind):
gateway = get_gateway()
j_result_type = _to_java_type(self._result_type)
PythonScalarFunction = gateway.jvm \
.org.apache.flink.table.functions.python.PythonScalarFunction
j_scalar_function = PythonScalarFunction(
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
j_function_kind,
self._deterministic,
_get_python_env())
return j_scalar_function
def _create_delegate_function(self) -> UserDefinedFunction:
return DelegatingScalarFunction(self._func)
class UserDefinedTableFunctionWrapper(UserDefinedFunctionWrapper):
"""
Wrapper for Python user-defined table function.
"""
def __init__(self, func, input_types, result_types, deterministic=None, name=None):
super(UserDefinedTableFunctionWrapper, self).__init__(
func, input_types, "general", deterministic, name)
from pyflink.table.types import RowType
if not isinstance(result_types, collections.Iterable) \
or isinstance(result_types, RowType):
result_types = [result_types]
for result_type in result_types:
if not isinstance(result_type, DataType):
raise TypeError(
"Invalid result_type: result_type should be DataType but contains {}".format(
result_type))
self._result_types = result_types
def _create_judf(self, serialized_func, j_input_types, j_function_kind):
gateway = get_gateway()
j_result_types = utils.to_jarray(gateway.jvm.TypeInformation,
[_to_java_type(i) for i in self._result_types])
j_result_type = gateway.jvm.org.apache.flink.api.java.typeutils.RowTypeInfo(j_result_types)
PythonTableFunction = gateway.jvm \
.org.apache.flink.table.functions.python.PythonTableFunction
j_table_function = PythonTableFunction(
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
j_function_kind,
self._deterministic,
_get_python_env())
return j_table_function
def _create_delegate_function(self) -> UserDefinedFunction:
return DelegationTableFunction(self._func)
class UserDefinedAggregateFunctionWrapper(UserDefinedFunctionWrapper):
"""
Wrapper for Python user-defined aggregate function.
"""
def __init__(self, func, input_types, result_type, accumulator_type, func_type,
deterministic, name):
super(UserDefinedAggregateFunctionWrapper, self).__init__(
func, input_types, func_type, deterministic, name)
if accumulator_type is None and func_type == "general":
accumulator_type = func.get_accumulator_type()
if result_type is None:
result_type = func.get_result_type()
if not isinstance(result_type, DataType):
raise TypeError(
"Invalid returnType: returnType should be DataType but is {}".format(result_type))
from pyflink.table.types import RowType, MapType
if func_type == 'pandas' and isinstance(result_type, (RowType, MapType)):
raise TypeError(
"Invalid returnType: Pandas UDAF doesn't support DataType type {} currently"
.format(result_type))
if accumulator_type is not None and not isinstance(accumulator_type, DataType):
raise TypeError(
"Invalid accumulator_type: accumulator_type should be DataType but is {}".format(
accumulator_type))
self._result_type = result_type
self._accumulator_type = accumulator_type
def _create_judf(self, serialized_func, j_input_types, j_function_kind):
if self._func_type == "pandas":
from pyflink.table.types import DataTypes
self._accumulator_type = DataTypes.ARRAY(self._result_type)
if j_input_types is not None:
gateway = get_gateway()
j_input_types = utils.to_jarray(
gateway.jvm.DataType, [_to_java_data_type(i) for i in self._input_types])
j_result_type = _to_java_data_type(self._result_type)
j_accumulator_type = _to_java_data_type(self._accumulator_type)
gateway = get_gateway()
PythonAggregateFunction = gateway.jvm \
.org.apache.flink.table.functions.python.PythonAggregateFunction
j_aggregate_function = PythonAggregateFunction(
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
j_accumulator_type,
j_function_kind,
self._deterministic,
_get_python_env())
return j_aggregate_function
def _create_delegate_function(self) -> UserDefinedFunction:
assert self._func_type == 'pandas'
return DelegatingPandasAggregateFunction(self._func)
# TODO: support to configure the python execution environment
def _get_python_env():
gateway = get_gateway()
exec_type = gateway.jvm.org.apache.flink.table.functions.python.PythonEnv.ExecType.PROCESS
return gateway.jvm.org.apache.flink.table.functions.python.PythonEnv(exec_type)
def _create_udf(f, input_types, result_type, func_type, deterministic, name):
return UserDefinedScalarFunctionWrapper(
f, input_types, result_type, func_type, deterministic, name)
def _create_udtf(f, input_types, result_types, deterministic, name):
return UserDefinedTableFunctionWrapper(f, input_types, result_types, deterministic, name)
def _create_udaf(f, input_types, result_type, accumulator_type, func_type, deterministic, name):
return UserDefinedAggregateFunctionWrapper(
f, input_types, result_type, accumulator_type, func_type, deterministic, name)
def udf(f: Union[Callable, UserDefinedFunction, Type] = None,
input_types: Union[List[DataType], DataType] = None, result_type: DataType = None,
deterministic: bool = None, name: str = None, func_type: str = "general",
udf_type: str = None) -> Union[UserDefinedScalarFunctionWrapper, Callable]:
"""
Helper method for creating a user-defined function.
Example:
::
>>> add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT())
>>> # The input_types is optional.
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())
:param f: lambda function or user-defined function.
:param input_types: optional, the input data types.
:param result_type: the result data type.
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:param name: the function name.
:param func_type: the type of the python function, available value: general, pandas,
(default: general)
:param udf_type: the type of the python function, available value: general, pandas,
(default: general)
:return: UserDefinedScalarFunctionWrapper or function.
.. versionadded:: 1.10.0
"""
if udf_type:
import warnings
warnings.warn("The param udf_type is deprecated in 1.12. Use func_type instead.")
func_type = udf_type
if func_type not in ('general', 'pandas'):
raise ValueError("The func_type must be one of 'general, pandas', got %s."
% func_type)
# decorator
if f is None:
return functools.partial(_create_udf, input_types=input_types, result_type=result_type,
func_type=func_type, deterministic=deterministic,
name=name)
else:
return _create_udf(f, input_types, result_type, func_type, deterministic, name)
def udtf(f: Union[Callable, UserDefinedFunction, Type] = None,
input_types: Union[List[DataType], DataType] = None,
result_types: Union[List[DataType], DataType] = None, deterministic: bool = None,
name: str = None) -> Union[UserDefinedTableFunctionWrapper, Callable]:
"""
Helper method for creating a user-defined table function.
Example:
::
>>> # The input_types is optional.
>>> @udtf(result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()])
... def range_emit(s, e):
... for i in range(e):
... yield s, i
>>> class MultiEmit(TableFunction):
... def eval(self, i):
... return range(i)
>>> multi_emit = udtf(MultiEmit(), DataTypes.BIGINT(), DataTypes.BIGINT())
:param f: user-defined table function.
:param input_types: optional, the input data types.
:param result_types: the result data types.
:param name: the function name.
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:return: UserDefinedTableFunctionWrapper or function.
.. versionadded:: 1.11.0
"""
# decorator
if f is None:
return functools.partial(_create_udtf, input_types=input_types, result_types=result_types,
deterministic=deterministic, name=name)
else:
return _create_udtf(f, input_types, result_types, deterministic, name)
def udaf(f: Union[Callable, UserDefinedFunction, Type] = None,
input_types: Union[List[DataType], DataType] = None, result_type: DataType = None,
accumulator_type: DataType = None, deterministic: bool = None, name: str = None,
func_type: str = "general") -> Union[UserDefinedAggregateFunctionWrapper, Callable]:
"""
Helper method for creating a user-defined aggregate function.
Example:
::
>>> # The input_types is optional.
>>> @udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
... def mean_udaf(v):
... return v.mean()
:param f: user-defined aggregate function.
:param input_types: optional, the input data types.
:param result_type: the result data type.
:param accumulator_type: optional, the accumulator data type.
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:param name: the function name.
:param func_type: the type of the python function, available value: general, pandas,
(default: general)
:return: UserDefinedAggregateFunctionWrapper or function.
.. versionadded:: 1.12.0
"""
if func_type not in ('general', 'pandas'):
raise ValueError("The func_type must be one of 'general, pandas', got %s."
% func_type)
# decorator
if f is None:
return functools.partial(_create_udaf, input_types=input_types, result_type=result_type,
accumulator_type=accumulator_type, func_type=func_type,
deterministic=deterministic, name=name)
else:
return _create_udaf(f, input_types, result_type, accumulator_type, func_type,
deterministic, name)
| |
#!/usr/bin/env python
"""
Copyright (c) 2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import itertools
import logging
import os
import sys
import cocotb_test.simulator
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge
from cocotb.regression import TestFactory
from cocotbext.eth import XgmiiSource, XgmiiFrame
try:
from baser import BaseRSerdesSink
except ImportError:
# attempt import from current directory
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
try:
from baser import BaseRSerdesSink
finally:
del sys.path[0]
class TB:
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
cocotb.start_soon(Clock(dut.clk, 6.4, units="ns").start())
self.source = XgmiiSource(dut.xgmii_txd, dut.xgmii_txc, dut.clk, dut.rst)
self.sink = BaseRSerdesSink(dut.encoded_tx_data, dut.encoded_tx_hdr, dut.clk, scramble=False)
async def reset(self):
self.dut.rst.setimmediatevalue(0)
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 1
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
self.dut.rst <= 0
await RisingEdge(self.dut.clk)
await RisingEdge(self.dut.clk)
async def run_test(dut, payload_lengths=None, payload_data=None, ifg=12, enable_dic=True,
force_offset_start=False):
tb = TB(dut)
tb.source.ifg = ifg
tb.source.enable_dic = enable_dic
tb.source.force_offset_start = force_offset_start
await tb.reset()
test_frames = [payload_data(x) for x in payload_lengths()]
for test_data in test_frames:
test_frame = XgmiiFrame.from_payload(test_data)
await tb.source.send(test_frame)
for test_data in test_frames:
rx_frame = await tb.sink.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
async def run_test_alignment(dut, payload_data=None, ifg=12, enable_dic=True,
force_offset_start=False):
tb = TB(dut)
byte_lanes = tb.source.byte_lanes
tb.source.ifg = ifg
tb.source.enable_dic = enable_dic
tb.source.force_offset_start = force_offset_start
for length in range(60, 92):
await tb.reset()
test_frames = [payload_data(length) for k in range(10)]
start_lane = []
for test_data in test_frames:
test_frame = XgmiiFrame.from_payload(test_data)
await tb.source.send(test_frame)
for test_data in test_frames:
rx_frame = await tb.sink.recv()
assert rx_frame.get_payload() == test_data
assert rx_frame.check_fcs()
assert rx_frame.ctrl is None
start_lane.append(rx_frame.start_lane)
tb.log.info("length: %d", length)
tb.log.info("start_lane: %s", start_lane)
start_lane_ref = []
# compute expected starting lanes
lane = 0
deficit_idle_count = 0
for test_data in test_frames:
if ifg == 0:
lane = 0
if force_offset_start and byte_lanes > 4:
lane = 4
start_lane_ref.append(lane)
lane = (lane + len(test_data)+4+ifg) % byte_lanes
if enable_dic:
offset = lane % 4
if deficit_idle_count+offset >= 4:
offset += 4
lane = (lane - offset) % byte_lanes
deficit_idle_count = (deficit_idle_count + offset) % 4
else:
offset = lane % 4
if offset > 0:
offset += 4
lane = (lane - offset) % byte_lanes
tb.log.info("start_lane_ref: %s", start_lane_ref)
assert start_lane_ref == start_lane
await RisingEdge(dut.clk)
assert tb.sink.empty()
await RisingEdge(dut.clk)
await RisingEdge(dut.clk)
def size_list():
return list(range(60, 128)) + [512, 1514, 9214] + [60]*10
def incrementing_payload(length):
return bytearray(itertools.islice(itertools.cycle(range(256)), length))
def cycle_en():
return itertools.cycle([0, 0, 0, 1])
if cocotb.SIM_NAME:
factory = TestFactory(run_test)
factory.add_option("payload_lengths", [size_list])
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12])
factory.add_option("enable_dic", [True, False])
factory.add_option("force_offset_start", [False, True])
factory.generate_tests()
factory = TestFactory(run_test_alignment)
factory.add_option("payload_data", [incrementing_payload])
factory.add_option("ifg", [12])
factory.add_option("enable_dic", [True, False])
factory.add_option("force_offset_start", [False, True])
factory.generate_tests()
# cocotb-test
tests_dir = os.path.abspath(os.path.dirname(__file__))
rtl_dir = os.path.abspath(os.path.join(tests_dir, '..', '..', 'rtl'))
lib_dir = os.path.abspath(os.path.join(rtl_dir, '..', 'lib'))
axis_rtl_dir = os.path.abspath(os.path.join(lib_dir, 'axis', 'rtl'))
def test_xgmii_baser_enc_64(request):
dut = "xgmii_baser_enc_64"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(rtl_dir, f"{dut}.v"),
]
parameters = {}
parameters['DATA_WIDTH'] = 64
parameters['CTRL_WIDTH'] = parameters['DATA_WIDTH'] // 8
parameters['HDR_WIDTH'] = 2
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
| |
# -*- coding: utf-8 -*-
"""
Tests of the neo.core.recordingchannel.RecordingChannel class
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division, print_function
try:
import unittest2 as unittest
except ImportError:
import unittest
import numpy as np
import quantities as pq
try:
from IPython.lib.pretty import pretty
except ImportError as err:
HAVE_IPYTHON = False
else:
HAVE_IPYTHON = True
from neo.core.recordingchannel import RecordingChannel
from neo.core.container import filterdata
from neo.core import IrregularlySampledSignal, RecordingChannelGroup, Unit
from neo.test.tools import (assert_neo_object_is_compliant,
assert_arrays_equal,
assert_same_sub_schema)
from neo.test.generate_datasets import (fake_neo, get_fake_value,
get_fake_values, get_annotations,
clone_object, TEST_ANNOTATIONS)
class Test__generate_datasets(unittest.TestCase):
def setUp(self):
np.random.seed(0)
self.annotations = dict([(str(x), TEST_ANNOTATIONS[x]) for x in
range(len(TEST_ANNOTATIONS))])
def test__get_fake_values(self):
self.annotations['seed'] = 0
index = get_fake_value('index', int, seed=0)
coordinate = get_fake_value('coordinate', pq.Quantity, seed=1, dim=1)
name = get_fake_value('name', str, seed=2, obj=RecordingChannel)
description = get_fake_value('description', str, seed=3,
obj='RecordingChannel')
file_origin = get_fake_value('file_origin', str)
attrs1 = {'index': index,
'name': name,
'description': description,
'file_origin': file_origin}
attrs2 = attrs1.copy()
attrs2.update(self.annotations)
res11 = get_fake_values(RecordingChannel, annotate=False, seed=0)
res12 = get_fake_values('RecordingChannel', annotate=False, seed=0)
res21 = get_fake_values(RecordingChannel, annotate=True, seed=0)
res22 = get_fake_values('RecordingChannel', annotate=True, seed=0)
assert_arrays_equal(res11.pop('coordinate'), coordinate)
assert_arrays_equal(res12.pop('coordinate'), coordinate)
assert_arrays_equal(res21.pop('coordinate'), coordinate)
assert_arrays_equal(res22.pop('coordinate'), coordinate)
self.assertEqual(res11, attrs1)
self.assertEqual(res12, attrs1)
self.assertEqual(res21, attrs2)
self.assertEqual(res22, attrs2)
def test__fake_neo__cascade(self):
self.annotations['seed'] = None
obj_type = RecordingChannel
cascade = True
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, RecordingChannel))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.analogsignals), 1)
self.assertEqual(len(res.irregularlysampledsignals), 1)
for child in res.children_recur:
del child.annotations['i']
del child.annotations['j']
self.assertEqual(res.analogsignals[0].annotations,
self.annotations)
self.assertEqual(res.irregularlysampledsignals[0].annotations,
self.annotations)
def test__fake_neo__nocascade(self):
self.annotations['seed'] = None
obj_type = 'RecordingChannel'
cascade = False
res = fake_neo(obj_type=obj_type, cascade=cascade)
self.assertTrue(isinstance(res, RecordingChannel))
assert_neo_object_is_compliant(res)
self.assertEqual(res.annotations, self.annotations)
self.assertEqual(len(res.analogsignals), 0)
self.assertEqual(len(res.irregularlysampledsignals), 0)
class TestRecordingChannel(unittest.TestCase):
def setUp(self):
self.nchildren = 2
self.seed1 = 0
self.seed2 = 10000
self.rchan1 = fake_neo(RecordingChannel,
seed=self.seed1, n=self.nchildren)
self.rchan2 = fake_neo(RecordingChannel,
seed=self.seed2, n=self.nchildren)
self.targobj = self.rchan1
self.sigs1 = self.rchan1.analogsignals
self.sigs2 = self.rchan2.analogsignals
self.irsigs1 = self.rchan1.irregularlysampledsignals
self.irsigs2 = self.rchan2.irregularlysampledsignals
self.sigs1a = clone_object(self.sigs1)
self.irsigs1a = clone_object(self.irsigs1)
def check_creation(self, rchan):
assert_neo_object_is_compliant(rchan)
seed = rchan.annotations['seed']
targ0 = get_fake_value('index', int, seed=seed+0, obj=RecordingChannel)
self.assertEqual(rchan.index, targ0)
targ1 = get_fake_value('coordinate', pq.Quantity, dim=1, seed=seed+1)
assert_arrays_equal(rchan.coordinate, targ1)
targ2 = get_fake_value('name', str, seed=seed+2, obj=RecordingChannel)
self.assertEqual(rchan.name, targ2)
targ3 = get_fake_value('description', str,
seed=seed+3, obj=RecordingChannel)
self.assertEqual(rchan.description, targ3)
targ4 = get_fake_value('file_origin', str)
self.assertEqual(rchan.file_origin, targ4)
targ5 = get_annotations()
targ5['seed'] = seed
self.assertEqual(rchan.annotations, targ5)
self.assertTrue(hasattr(rchan, 'analogsignals'))
self.assertTrue(hasattr(rchan, 'irregularlysampledsignals'))
self.assertEqual(len(rchan.analogsignals), self.nchildren)
self.assertEqual(len(rchan.irregularlysampledsignals), self.nchildren)
def test__creation(self):
self.check_creation(self.rchan1)
self.check_creation(self.rchan2)
def test__merge(self):
rchan1a = fake_neo(RecordingChannel, seed=self.seed1, n=self.nchildren)
assert_same_sub_schema(self.rchan1, rchan1a)
rchan1a.annotate(seed=self.seed2)
rchan1a.analogsignals.append(self.sigs2[0])
rchan1a.merge(self.rchan2)
self.check_creation(self.rchan2)
assert_same_sub_schema(self.sigs1a + self.sigs2,
rchan1a.analogsignals)
assert_same_sub_schema(self.irsigs1a + self.irsigs2,
rchan1a.irregularlysampledsignals)
def test__children(self):
rcg1 = RecordingChannelGroup(name='rcg1')
rcg2 = RecordingChannelGroup(name='rcg2')
rcg1.recordingchannels = [self.rchan1]
rcg2.recordingchannels = [self.rchan1]
rcg2.create_relationship()
rcg1.create_relationship()
assert_neo_object_is_compliant(self.rchan1)
assert_neo_object_is_compliant(rcg1)
assert_neo_object_is_compliant(rcg2)
self.assertEqual(self.rchan1._container_child_objects, ())
self.assertEqual(self.rchan1._data_child_objects,
('AnalogSignal', 'IrregularlySampledSignal'))
self.assertEqual(self.rchan1._single_parent_objects, ())
self.assertEqual(self.rchan1._multi_child_objects, ())
self.assertEqual(self.rchan1._multi_parent_objects,
('RecordingChannelGroup',))
self.assertEqual(self.rchan1._child_properties, ())
self.assertEqual(self.rchan1._single_child_objects,
('AnalogSignal', 'IrregularlySampledSignal'))
self.assertEqual(self.rchan1._container_child_containers, ())
self.assertEqual(self.rchan1._data_child_containers,
('analogsignals', 'irregularlysampledsignals',))
self.assertEqual(self.rchan1._single_child_containers,
('analogsignals', 'irregularlysampledsignals',))
self.assertEqual(self.rchan1._single_parent_containers, ())
self.assertEqual(self.rchan1._multi_child_containers, ())
self.assertEqual(self.rchan1._multi_parent_containers,
('recordingchannelgroups',))
self.assertEqual(self.rchan1._child_objects,
('AnalogSignal', 'IrregularlySampledSignal'))
self.assertEqual(self.rchan1._child_containers,
('analogsignals', 'irregularlysampledsignals',))
self.assertEqual(self.rchan1._parent_objects,
('RecordingChannelGroup',))
self.assertEqual(self.rchan1._parent_containers,
('recordingchannelgroups',))
self.assertEqual(len(self.rchan1._single_children), self.nchildren*2)
self.assertEqual(len(self.rchan1._multi_children), 0)
self.assertEqual(len(self.rchan1.data_children), self.nchildren*2)
self.assertEqual(len(self.rchan1.data_children_recur),
self.nchildren*2)
self.assertEqual(len(self.rchan1.container_children), 0)
self.assertEqual(len(self.rchan1.container_children_recur), 0)
self.assertEqual(len(self.rchan1.children), self.nchildren*2)
self.assertEqual(len(self.rchan1.children_recur), self.nchildren*2)
self.assertEqual(self.rchan1._multi_children, ())
self.assertEqual(self.rchan1.container_children, ())
self.assertEqual(self.rchan1.container_children_recur, ())
assert_same_sub_schema(list(self.rchan1._single_children),
self.sigs1a+self.irsigs1a)
assert_same_sub_schema(list(self.rchan1.data_children),
self.sigs1a+self.irsigs1a)
assert_same_sub_schema(list(self.rchan1.data_children_recur),
self.sigs1a+self.irsigs1a)
assert_same_sub_schema(list(self.rchan1.children),
self.sigs1a+self.irsigs1a)
assert_same_sub_schema(list(self.rchan1.children_recur),
self.sigs1a+self.irsigs1a)
self.assertEqual(len(self.rchan1.parents), 2)
self.assertEqual(self.rchan1.parents[0].name, 'rcg2')
self.assertEqual(self.rchan1.parents[1].name, 'rcg1')
def test__size(self):
targ = {'analogsignals': self.nchildren,
'irregularlysampledsignals': self.nchildren}
self.assertEqual(self.targobj.size, targ)
def test__filter_none(self):
targ = []
res1 = self.targobj.filter()
res2 = self.targobj.filter({})
res3 = self.targobj.filter([])
res4 = self.targobj.filter([{}])
res5 = self.targobj.filter([{}, {}])
res6 = self.targobj.filter([{}, {}])
res7 = self.targobj.filter(targdict={})
res8 = self.targobj.filter(targdict=[])
res9 = self.targobj.filter(targdict=[{}])
res10 = self.targobj.filter(targdict=[{}, {}])
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
def test__filter_annotation_single(self):
targ = [self.sigs1a[1], self.irsigs1a[1]]
res0 = self.targobj.filter(j=1)
res1 = self.targobj.filter({'j': 1})
res2 = self.targobj.filter(targdict={'j': 1})
res3 = self.targobj.filter([{'j': 1}])
res4 = self.targobj.filter(targdict=[{'j': 1}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_single_annotation_nores(self):
targ = []
res0 = self.targobj.filter(j=5)
res1 = self.targobj.filter({'j': 5})
res2 = self.targobj.filter(targdict={'j': 5})
res3 = self.targobj.filter([{'j': 5}])
res4 = self.targobj.filter(targdict=[{'j': 5}])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_attribute_single(self):
targ = [self.sigs1a[0]]
name = self.sigs1a[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_attribute_single_nores(self):
targ = []
name = self.sigs2[0].name
res0 = self.targobj.filter(name=name)
res1 = self.targobj.filter({'name': name})
res2 = self.targobj.filter(targdict={'name': name})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi(self):
targ = [self.sigs1a[1], self.irsigs1a[1], self.sigs1a[0]]
name = self.sigs1a[0].name
res0 = self.targobj.filter(name=name, j=1)
res1 = self.targobj.filter({'name': name, 'j': 1})
res2 = self.targobj.filter(targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_multi_nores(self):
targ = []
name0 = self.sigs2[0].name
res0 = self.targobj.filter([{'j': 5}, {}])
res1 = self.targobj.filter({}, j=0)
res2 = self.targobj.filter([{}], i=0)
res3 = self.targobj.filter({'name': name0}, j=1)
res4 = self.targobj.filter(targdict={'name': name0}, j=1)
res5 = self.targobj.filter(name=name0, targdict={'j': 1})
res6 = self.targobj.filter(name=name0, j=5)
res7 = self.targobj.filter({'name': name0, 'j': 5})
res8 = self.targobj.filter(targdict={'name': name0, 'j': 5})
res9 = self.targobj.filter({'name': name0}, j=5)
res10 = self.targobj.filter(targdict={'name': name0}, j=5)
res11 = self.targobj.filter(name=name0, targdict={'j': 5})
res12 = self.targobj.filter({'name': name0}, j=5)
res13 = self.targobj.filter(targdict={'name': name0}, j=5)
res14 = self.targobj.filter(name=name0, targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filter_multi_partres(self):
targ = [self.sigs1a[0]]
name = self.sigs1a[0].name
res0 = self.targobj.filter(name=name, j=5)
res1 = self.targobj.filter({'name': name, 'j': 5})
res2 = self.targobj.filter(targdict={'name': name, 'j': 5})
res3 = self.targobj.filter([{'j': 0}, {'i': 0}])
res4 = self.targobj.filter({'j': 0}, i=0)
res5 = self.targobj.filter([{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
def test__filter_single_annotation_obj_single(self):
targ = [self.irsigs1a[1]]
res0 = self.targobj.filter(j=1, objects='IrregularlySampledSignal')
res1 = self.targobj.filter(j=1, objects=IrregularlySampledSignal)
res2 = self.targobj.filter(j=1, objects=['IrregularlySampledSignal'])
res3 = self.targobj.filter(j=1, objects=[IrregularlySampledSignal])
res4 = self.targobj.filter(j=1, objects=[IrregularlySampledSignal,
Unit])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
def test__filter_single_annotation_obj_multi(self):
targ = [self.sigs1a[1], self.irsigs1a[1]]
res0 = self.targobj.filter(j=1, objects=['AnalogSignal',
IrregularlySampledSignal])
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_obj_none(self):
targ = []
res0 = self.targobj.filter(j=1, objects=Unit)
res1 = self.targobj.filter(j=1, objects='Unit')
res2 = self.targobj.filter(j=1, objects=[])
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filter_single_annotation_norecur(self):
targ = [self.sigs1a[1], self.irsigs1a[1]]
res0 = self.targobj.filter(j=1, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_norecur(self):
targ = [self.sigs1a[0]]
res0 = self.targobj.filter(name=self.sigs1a[0].name, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata(self):
targ = []
res0 = self.targobj.filter(j=1, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata(self):
targ = []
res0 = self.targobj.filter(name=self.sigs1a[0].name, data=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(j=1,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigs1a[0].name,
data=False, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container(self):
targ = [self.sigs1a[1], self.irsigs1a[1]]
res0 = self.targobj.filter(j=1, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container(self):
targ = [self.sigs1a[0]]
res0 = self.targobj.filter(name=self.sigs1a[0].name, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_container_norecur(self):
targ = [self.sigs1a[1], self.irsigs1a[1]]
res0 = self.targobj.filter(j=1, container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_container_norecur(self):
targ = [self.sigs1a[0]]
res0 = self.targobj.filter(name=self.sigs1a[0].name,
container=True, recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container(self):
targ = []
res0 = self.targobj.filter(j=1,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container(self):
targ = []
res0 = self.targobj.filter(name=self.sigs1a[0].name,
data=False, container=True)
assert_same_sub_schema(res0, targ)
def test__filter_single_annotation_nodata_container_norecur(self):
targ = []
res0 = self.targobj.filter(j=1,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filter_single_attribute_nodata_container_norecur(self):
targ = []
res0 = self.targobj.filter(name=self.sigs1a[0].name,
data=False, container=True,
recursive=False)
assert_same_sub_schema(res0, targ)
def test__filterdata_multi(self):
data = self.targobj.children_recur
targ = [self.sigs1a[1], self.irsigs1a[1], self.sigs1a[0]]
name = self.sigs1a[0].name
res0 = filterdata(data, name=name, j=1)
res1 = filterdata(data, {'name': name, 'j': 1})
res2 = filterdata(data, targdict={'name': name, 'j': 1})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
def test__filterdata_multi_nores(self):
data = self.targobj.children_recur
targ = []
name1 = self.sigs1a[0].name
name2 = self.sigs2[0].name
res0 = filterdata(data, [{'j': 0}, {}])
res1 = filterdata(data, {}, i=0)
res2 = filterdata(data, [{}], i=0)
res3 = filterdata(data, name=name1, targdict={'j': 1})
res4 = filterdata(data, {'name': name1}, j=1)
res5 = filterdata(data, targdict={'name': name1}, j=1)
res6 = filterdata(data, name=name2, j=5)
res7 = filterdata(data, {'name': name2, 'j': 5})
res8 = filterdata(data, targdict={'name': name2, 'j': 5})
res9 = filterdata(data, {'name': name2}, j=5)
res10 = filterdata(data, targdict={'name': name2}, j=5)
res11 = filterdata(data, name=name2, targdict={'j': 5})
res12 = filterdata(data, {'name': name1}, j=5)
res13 = filterdata(data, targdict={'name': name1}, j=5)
res14 = filterdata(data, name=name1, targdict={'j': 5})
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
assert_same_sub_schema(res6, targ)
assert_same_sub_schema(res7, targ)
assert_same_sub_schema(res8, targ)
assert_same_sub_schema(res9, targ)
assert_same_sub_schema(res10, targ)
assert_same_sub_schema(res11, targ)
assert_same_sub_schema(res12, targ)
assert_same_sub_schema(res13, targ)
assert_same_sub_schema(res14, targ)
def test__filterdata_multi_partres(self):
data = self.targobj.children_recur
targ = [self.sigs1a[0]]
name = self.sigs1a[0].name
res0 = filterdata(data, name=name, j=5)
res1 = filterdata(data, {'name': name, 'j': 5})
res2 = filterdata(data, targdict={'name': name, 'j': 5})
res3 = filterdata(data, [{'j': 0}, {'i': 0}])
res4 = filterdata(data, {'j': 0}, i=0)
res5 = filterdata(data, [{'j': 0}], i=0)
assert_same_sub_schema(res0, targ)
assert_same_sub_schema(res1, targ)
assert_same_sub_schema(res2, targ)
assert_same_sub_schema(res3, targ)
assert_same_sub_schema(res4, targ)
assert_same_sub_schema(res5, targ)
@unittest.skipUnless(HAVE_IPYTHON, "requires IPython")
def test__pretty(self):
ann = get_annotations()
ann['seed'] = self.seed1
ann = pretty(ann).replace('\n ', '\n ')
res = pretty(self.rchan1)
targ = ("RecordingChannel with " +
("%s analogsignals, %s irregularlysampledsignals\n" %
(len(self.sigs1), len(self.irsigs1))) +
("name: '%s'\ndescription: '%s'\n" % (self.rchan1.name,
self.rchan1.description)
) +
("annotations: %s" % ann))
self.assertEqual(res, targ)
if __name__ == "__main__":
unittest.main()
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import urlresolvers
from django.template import defaultfilters as d_filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.database_backups \
import tables as backup_tables
ACTIVE_STATES = ("ACTIVE",)
class TerminateInstance(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate Instance",
u"Terminate Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled termination of Instance",
u"Scheduled termination of Instances",
count
)
name = "terminate"
classes = ("btn-danger", )
icon = "off"
def action(self, request, obj_id):
api.trove.instance_delete(request, obj_id)
class RestartInstance(tables.BatchAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Restart Instance",
u"Restart Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Restarted Instance",
u"Restarted Instances",
count
)
name = "restart"
classes = ('btn-danger', 'btn-reboot')
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTDOWN'))
def action(self, request, obj_id):
api.trove.instance_restart(request, obj_id)
class DeleteUser(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.user_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database user.')
exceptions.handle(request, msg)
class DeleteDatabase(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Database",
u"Delete Databases",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Database",
u"Deleted Databases",
count
)
def delete(self, request, obj_id):
datum = self.table.get_object_by_id(obj_id)
try:
api.trove.database_delete(request, datum.instance.id, datum.name)
except Exception:
msg = _('Error deleting database on instance.')
exceptions.handle(request, msg)
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:databases:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
class CreateBackup(tables.LinkAction):
name = "backup"
verbose_name = _("Create Backup")
url = "horizon:project:database_backups:create"
classes = ("ajax-modal",)
icon = "camera"
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
request.user.has_perm('openstack.services.object-store'))
def get_link_url(self, datam):
url = urlresolvers.reverse(self.url)
return url + "?instance=%s" % datam.id
class ResizeVolume(tables.LinkAction):
name = "resize_volume"
verbose_name = _("Resize Volume")
url = "horizon:project:databases:resize_volume"
classes = ("ajax-modal", "btn-resize")
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class ResizeInstance(tables.LinkAction):
name = "resize_instance"
verbose_name = _("Resize Instance")
url = "horizon:project:databases:resize_instance"
classes = ("ajax-modal", "btn-resize")
def allowed(self, request, instance=None):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.trove.instance_get(request, instance_id)
try:
flavor_id = instance.flavor['id']
instance.full_flavor = api.trove.flavor_get(request, flavor_id)
except Exception:
pass
instance.host = get_host(instance)
return instance
def get_datastore(instance):
if hasattr(instance, "datastore"):
return instance.datastore["type"]
return _("Not available")
def get_datastore_version(instance):
if hasattr(instance, "datastore"):
return instance.datastore["version"]
return _("Not available")
def get_host(instance):
if hasattr(instance, "hostname"):
return instance.hostname
elif hasattr(instance, "ip") and instance.ip:
return instance.ip[0]
return _("Not Assigned")
def get_size(instance):
if hasattr(instance, "full_flavor"):
size_string = _("%(name)s | %(RAM)s RAM")
vals = {'name': instance.full_flavor.name,
'RAM': sizeformat.mb_float_format(instance.full_flavor.ram)}
return size_string % vals
return _("Not available")
def get_volume_size(instance):
if hasattr(instance, "volume"):
return sizeformat.diskgbformat(instance.volume.get("size"))
return _("Not available")
def get_databases(user):
if hasattr(user, "access"):
databases = [db.name for db in user.access]
databases.sort()
return ', '.join(databases)
return _("-")
class InstancesTable(tables.DataTable):
STATUS_CHOICES = (
("ACTIVE", True),
("BLOCKED", True),
("BUILD", None),
("FAILED", False),
("REBOOT", None),
("RESIZE", None),
("BACKUP", None),
("SHUTDOWN", False),
("ERROR", False),
("RESTART_REQUIRED", None),
)
name = tables.Column("name",
link=("horizon:project:databases:detail"),
verbose_name=_("Instance Name"))
datastore = tables.Column(get_datastore,
verbose_name=_("Datastore"))
datastore_version = tables.Column(get_datastore_version,
verbose_name=_("Datastore Version"))
host = tables.Column(get_host, verbose_name=_("Host"))
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
volume = tables.Column(get_volume_size,
verbose_name=_("Volume Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
filters=(d_filters.title,
filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
class Meta:
name = "databases"
verbose_name = _("Instances")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (LaunchLink, TerminateInstance)
row_actions = (CreateBackup,
ResizeVolume,
ResizeInstance,
RestartInstance,
TerminateInstance)
class UsersTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("User Name"))
host = tables.Column("host", verbose_name=_("Allowed Host"))
databases = tables.Column(get_databases, verbose_name=_("Databases"))
class Meta:
name = "users"
verbose_name = _("Users")
table_actions = [DeleteUser]
row_actions = [DeleteUser]
def get_object_id(self, datum):
return datum.name
class DatabaseTable(tables.DataTable):
name = tables.Column("name", verbose_name=_("Database Name"))
class Meta:
name = "databases"
verbose_name = _("Databases")
table_actions = [DeleteDatabase]
row_actions = [DeleteDatabase]
def get_object_id(self, datum):
return datum.name
def is_incremental(obj):
return hasattr(obj, 'parent_id') and obj.parent_id is not None
class InstanceBackupsTable(tables.DataTable):
name = tables.Column("name",
link=("horizon:project:database_backups:detail"),
verbose_name=_("Name"))
created = tables.Column("created", verbose_name=_("Created"),
filters=[filters.parse_isotime])
location = tables.Column(lambda obj: _("Download"),
link=lambda obj: obj.locationRef,
verbose_name=_("Backup File"))
incremental = tables.Column(is_incremental,
verbose_name=_("Incremental"),
filters=(d_filters.yesno,
d_filters.capfirst))
status = tables.Column("status",
filters=(d_filters.title,
filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=backup_tables.STATUS_CHOICES)
class Meta:
name = "backups"
verbose_name = _("Backups")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (backup_tables.LaunchLink, backup_tables.DeleteBackup)
row_actions = (backup_tables.RestoreLink, backup_tables.DeleteBackup)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.